mirror of https://github.com/grafana/grafana.git
Merge branch 'main' into api-client/headers
This commit is contained in:
commit
b3ff62a46f
|
@ -13,6 +13,7 @@ import (
|
||||||
// API errors that we need to convey after parsing real GH errors (or faking them).
|
// API errors that we need to convey after parsing real GH errors (or faking them).
|
||||||
var (
|
var (
|
||||||
ErrResourceNotFound = errors.New("the resource does not exist")
|
ErrResourceNotFound = errors.New("the resource does not exist")
|
||||||
|
ErrUnauthorized = errors.New("unauthorized")
|
||||||
//lint:ignore ST1005 this is not punctuation
|
//lint:ignore ST1005 this is not punctuation
|
||||||
ErrServiceUnavailable = apierrors.NewServiceUnavailable("github is unavailable")
|
ErrServiceUnavailable = apierrors.NewServiceUnavailable("github is unavailable")
|
||||||
ErrTooManyItems = errors.New("maximum number of items exceeded")
|
ErrTooManyItems = errors.New("maximum number of items exceeded")
|
||||||
|
|
|
@ -199,6 +199,9 @@ func (r *githubClient) DeleteWebhook(ctx context.Context, owner, repository stri
|
||||||
if ghErr.Response.StatusCode == http.StatusNotFound {
|
if ghErr.Response.StatusCode == http.StatusNotFound {
|
||||||
return ErrResourceNotFound
|
return ErrResourceNotFound
|
||||||
}
|
}
|
||||||
|
if ghErr.Response.StatusCode == http.StatusUnauthorized || ghErr.Response.StatusCode == http.StatusForbidden {
|
||||||
|
return ErrUnauthorized
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -975,6 +975,27 @@ func TestGithubClient_DeleteWebhook(t *testing.T) {
|
||||||
webhookID: 789,
|
webhookID: 789,
|
||||||
wantErr: ErrServiceUnavailable,
|
wantErr: ErrServiceUnavailable,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "unauthorized to delete the webhook",
|
||||||
|
mockHandler: mockhub.NewMockedHTTPClient(
|
||||||
|
mockhub.WithRequestMatchHandler(
|
||||||
|
mockhub.DeleteReposHooksByOwnerByRepoByHookId,
|
||||||
|
http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusUnauthorized)
|
||||||
|
require.NoError(t, json.NewEncoder(w).Encode(github.ErrorResponse{
|
||||||
|
Response: &http.Response{
|
||||||
|
StatusCode: http.StatusUnauthorized,
|
||||||
|
},
|
||||||
|
Message: "401 bad credentials",
|
||||||
|
}))
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
owner: "test-owner",
|
||||||
|
repository: "test-repo",
|
||||||
|
webhookID: 789,
|
||||||
|
wantErr: ErrUnauthorized,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "other error",
|
name: "other error",
|
||||||
mockHandler: mockhub.NewMockedHTTPClient(
|
mockHandler: mockhub.NewMockedHTTPClient(
|
||||||
|
|
|
@ -274,11 +274,15 @@ func (r *githubWebhookRepository) deleteWebhook(ctx context.Context) error {
|
||||||
id := r.config.Status.Webhook.ID
|
id := r.config.Status.Webhook.ID
|
||||||
|
|
||||||
err := r.gh.DeleteWebhook(ctx, r.owner, r.repo, id)
|
err := r.gh.DeleteWebhook(ctx, r.owner, r.repo, id)
|
||||||
if err != nil && !errors.Is(err, ErrResourceNotFound) {
|
if err != nil && !errors.Is(err, ErrResourceNotFound) && !errors.Is(err, ErrUnauthorized) {
|
||||||
return fmt.Errorf("delete webhook: %w", err)
|
return fmt.Errorf("delete webhook: %w", err)
|
||||||
}
|
}
|
||||||
if errors.Is(err, ErrResourceNotFound) {
|
if errors.Is(err, ErrResourceNotFound) {
|
||||||
logger.Info("webhook does not exist", "url", r.config.Status.Webhook.URL, "id", id)
|
logger.Warn("webhook no longer exists", "url", r.config.Status.Webhook.URL, "id", id)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrUnauthorized) {
|
||||||
|
logger.Warn("webhook deletion failed. no longer authorized to delete this webhook", "url", r.config.Status.Webhook.URL, "id", id)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1565,6 +1565,32 @@ func TestGitHubRepository_OnDelete(t *testing.T) {
|
||||||
// We don't return an error if the webhook is already gone
|
// We don't return an error if the webhook is already gone
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "unauthorized to delete the webhook",
|
||||||
|
setupMock: func(m *MockClient) {
|
||||||
|
m.On("DeleteWebhook", mock.Anything, "grafana", "grafana", int64(123)).
|
||||||
|
Return(ErrUnauthorized)
|
||||||
|
},
|
||||||
|
config: &provisioning.Repository{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "test-repo",
|
||||||
|
},
|
||||||
|
Spec: provisioning.RepositorySpec{
|
||||||
|
GitHub: &provisioning.GitHubRepositoryConfig{
|
||||||
|
Branch: "main",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: provisioning.RepositoryStatus{
|
||||||
|
Webhook: &provisioning.WebhookStatus{
|
||||||
|
ID: 123,
|
||||||
|
URL: "https://example.com/webhook",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
webhookURL: "https://example.com/webhook",
|
||||||
|
// We don't return an error if access to the webhook is revoked
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "no webhook URL provided",
|
name: "no webhook URL provided",
|
||||||
setupMock: func(_ *MockClient) {},
|
setupMock: func(_ *MockClient) {},
|
||||||
|
|
|
@ -75,11 +75,6 @@ func ValidateRepository(repo Repository) field.ErrorList {
|
||||||
"The target type is required when sync is enabled"))
|
"The target type is required when sync is enabled"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < 10 {
|
|
||||||
list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"),
|
|
||||||
cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", 10)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserved names (for now)
|
// Reserved names (for now)
|
||||||
reserved := []string{"classic", "sql", "SQL", "plugins", "legacy", "new", "job", "github", "s3", "gcs", "file", "new", "create", "update", "delete"}
|
reserved := []string{"classic", "sql", "SQL", "plugins", "legacy", "new", "job", "github", "s3", "gcs", "file", "new", "create", "update", "delete"}
|
||||||
if slices.Contains(reserved, cfg.Name) {
|
if slices.Contains(reserved, cfg.Name) {
|
||||||
|
|
|
@ -74,28 +74,6 @@ func TestValidateRepository(t *testing.T) {
|
||||||
require.Contains(t, errors.ToAggregate().Error(), "spec.sync.target: Required value")
|
require.Contains(t, errors.ToAggregate().Error(), "spec.sync.target: Required value")
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "sync interval too low",
|
|
||||||
repository: func() *MockRepository {
|
|
||||||
m := NewMockRepository(t)
|
|
||||||
m.On("Config").Return(&provisioning.Repository{
|
|
||||||
Spec: provisioning.RepositorySpec{
|
|
||||||
Title: "Test Repo",
|
|
||||||
Sync: provisioning.SyncOptions{
|
|
||||||
Enabled: true,
|
|
||||||
Target: "test",
|
|
||||||
IntervalSeconds: 5,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
m.On("Validate").Return(field.ErrorList{})
|
|
||||||
return m
|
|
||||||
}(),
|
|
||||||
expectedErrs: 1,
|
|
||||||
validateError: func(t *testing.T, errors field.ErrorList) {
|
|
||||||
require.Contains(t, errors.ToAggregate().Error(), "spec.sync.intervalSeconds: Invalid value")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "reserved name",
|
name: "reserved name",
|
||||||
repository: func() *MockRepository {
|
repository: func() *MockRepository {
|
||||||
|
@ -191,11 +169,10 @@ func TestValidateRepository(t *testing.T) {
|
||||||
m.On("Validate").Return(field.ErrorList{})
|
m.On("Validate").Return(field.ErrorList{})
|
||||||
return m
|
return m
|
||||||
}(),
|
}(),
|
||||||
expectedErrs: 4, // Updated from 3 to 4 to match actual errors:
|
expectedErrs: 3,
|
||||||
// 1. missing title
|
// 1. missing title
|
||||||
// 2. sync target missing
|
// 2. sync target missing
|
||||||
// 3. sync interval too low
|
// 3. reserved name
|
||||||
// 4. reserved name
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "branch workflow for non-github repository",
|
name: "branch workflow for non-github repository",
|
||||||
|
@ -447,18 +424,6 @@ func TestFromFieldError(t *testing.T) {
|
||||||
expectedType: metav1.CauseTypeFieldValueRequired,
|
expectedType: metav1.CauseTypeFieldValueRequired,
|
||||||
expectedDetail: "a repository title must be given",
|
expectedDetail: "a repository title must be given",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "invalid field error",
|
|
||||||
fieldError: &field.Error{
|
|
||||||
Type: field.ErrorTypeInvalid,
|
|
||||||
Field: "spec.sync.intervalSeconds",
|
|
||||||
Detail: "Interval must be at least 10 seconds",
|
|
||||||
},
|
|
||||||
expectedCode: http.StatusBadRequest,
|
|
||||||
expectedField: "spec.sync.intervalSeconds",
|
|
||||||
expectedType: metav1.CauseTypeFieldValueInvalid,
|
|
||||||
expectedDetail: "Interval must be at least 10 seconds",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "not supported field error",
|
name: "not supported field error",
|
||||||
fieldError: &field.Error{
|
fieldError: &field.Error{
|
||||||
|
|
|
@ -2229,3 +2229,8 @@ allowed_targets = instance|folder
|
||||||
# Whether image rendering is allowed for dashboard previews.
|
# Whether image rendering is allowed for dashboard previews.
|
||||||
# Requires image rendering service to be configured.
|
# Requires image rendering service to be configured.
|
||||||
allow_image_rendering = true
|
allow_image_rendering = true
|
||||||
|
|
||||||
|
# The minimum sync interval that can be set for a repository. This is how often the controller
|
||||||
|
# will check if there has been any changes to the repository not propagated by a webhook.
|
||||||
|
# The minimum value is 10 seconds.
|
||||||
|
min_sync_interval = 10s
|
||||||
|
|
|
@ -223,7 +223,7 @@ Team provisioning requires `group_sync_enabled = true` in the SCIM configuration
|
||||||
{{< /admonition >}}
|
{{< /admonition >}}
|
||||||
|
|
||||||
{{< admonition type="warning" >}}
|
{{< admonition type="warning" >}}
|
||||||
Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider.
|
Teams provisioned through SCIM cannot be deleted manually from Grafana - they can only be deleted by removing their corresponding groups from the identity provider. Optionally, you can disable SCIM group sync to allow manual deletion of teams.
|
||||||
{{< /admonition >}}
|
{{< /admonition >}}
|
||||||
|
|
||||||
For detailed configuration steps specific to the identity provider, see:
|
For detailed configuration steps specific to the identity provider, see:
|
||||||
|
|
|
@ -598,7 +598,7 @@
|
||||||
"auth-validator",
|
"auth-validator",
|
||||||
"config-loader",
|
"config-loader",
|
||||||
"config-writer",
|
"config-writer",
|
||||||
"metrics-collector",
|
"metrics-collector-last-span",
|
||||||
"log-writer",
|
"log-writer",
|
||||||
"log-reader",
|
"log-reader",
|
||||||
"event-publisher",
|
"event-publisher",
|
||||||
|
|
|
@ -2,11 +2,6 @@ import { test, expect } from '@grafana/plugin-e2e';
|
||||||
|
|
||||||
import longTraceResponse from '../fixtures/long-trace-response.json';
|
import longTraceResponse from '../fixtures/long-trace-response.json';
|
||||||
|
|
||||||
// this test requires a larger viewport
|
|
||||||
test.use({
|
|
||||||
viewport: { width: 1280, height: 1080 },
|
|
||||||
});
|
|
||||||
|
|
||||||
test.describe(
|
test.describe(
|
||||||
'Trace view',
|
'Trace view',
|
||||||
{
|
{
|
||||||
|
@ -33,7 +28,7 @@ test.describe(
|
||||||
await datasourceList.getByText('gdev-jaeger').click();
|
await datasourceList.getByText('gdev-jaeger').click();
|
||||||
|
|
||||||
// Check that gdev-jaeger is visible in the query editor
|
// Check that gdev-jaeger is visible in the query editor
|
||||||
await expect(page.getByText('gdev-jaeger')).toBeVisible();
|
await expect(page.getByTestId('query-editor-row').getByText('(gdev-jaeger)')).toBeVisible();
|
||||||
|
|
||||||
// Type the query
|
// Type the query
|
||||||
const queryField = page
|
const queryField = page
|
||||||
|
@ -44,14 +39,22 @@ test.describe(
|
||||||
// Use Shift+Enter to execute the query
|
// Use Shift+Enter to execute the query
|
||||||
await queryField.press('Shift+Enter');
|
await queryField.press('Shift+Enter');
|
||||||
|
|
||||||
// Get the initial count of span bars
|
// Wait for the trace viewer to be ready
|
||||||
const initialSpanBars = page.getByTestId(selectors.components.TraceViewer.spanBar);
|
await expect(page.getByRole('switch', { name: /api\-gateway GET/ })).toBeVisible();
|
||||||
const initialSpanBarCount = await initialSpanBars.count();
|
|
||||||
|
|
||||||
await initialSpanBars.last().scrollIntoViewIfNeeded();
|
// Note the scrolling element is actually the first child of the scroll view, but we can use the scroll wheel on this anyway
|
||||||
await expect
|
const scrollEl = page.getByTestId(selectors.pages.Explore.General.scrollView);
|
||||||
.poll(async () => await page.getByTestId(selectors.components.TraceViewer.spanBar).count())
|
|
||||||
.toBeGreaterThan(initialSpanBarCount);
|
// Assert that the last span is not visible in th page - it should be lazily rendered as the user scrolls
|
||||||
|
const lastSpan = page.getByRole('switch', { name: /metrics\-collector\-last\-span GET/ });
|
||||||
|
await expect(lastSpan).not.toBeVisible();
|
||||||
|
|
||||||
|
// Scroll until the "metrics-collector-last-span GET" switch is visible
|
||||||
|
await expect(async () => {
|
||||||
|
await scrollEl.hover();
|
||||||
|
await page.mouse.wheel(0, 1000);
|
||||||
|
await expect(lastSpan).toBeVisible({ timeout: 1 });
|
||||||
|
}).toPass();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
|
@ -885,6 +885,11 @@ export interface FeatureToggles {
|
||||||
*/
|
*/
|
||||||
alertingJiraIntegration?: boolean;
|
alertingJiraIntegration?: boolean;
|
||||||
/**
|
/**
|
||||||
|
*
|
||||||
|
* @default true
|
||||||
|
*/
|
||||||
|
alertingUseNewSimplifiedRoutingHashAlgorithm?: boolean;
|
||||||
|
/**
|
||||||
* Use the scopes navigation endpoint instead of the dashboardbindings endpoint
|
* Use the scopes navigation endpoint instead of the dashboardbindings endpoint
|
||||||
*/
|
*/
|
||||||
useScopesNavigationEndpoint?: boolean;
|
useScopesNavigationEndpoint?: boolean;
|
||||||
|
|
|
@ -23,9 +23,9 @@ const qualifiedNameFmt string = "^(" + qnameCharFmt + qnameExtCharFmt + "*)?" +
|
||||||
const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
|
const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
|
||||||
|
|
||||||
const alphaCharFmt string = "[A-Za-z]"
|
const alphaCharFmt string = "[A-Za-z]"
|
||||||
const resourceCharFmt string = "[A-Za-z0-9]" // alpha numeric
|
const resourceCharFmt string = "[A-Za-z0-9-]" // alpha numeric plus dashes
|
||||||
const resourceFmt string = "^" + alphaCharFmt + resourceCharFmt + "*$"
|
const resourceFmt string = "^" + alphaCharFmt + resourceCharFmt + "*$"
|
||||||
const resourceErrMsg string = "must consist of alphanumeric characters"
|
const resourceErrMsg string = "must consist of alphanumeric characters and dashes, and must start with an alphabetic character"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
grafanaNameRegexp = regexp.MustCompile(grafanaNameFmt).MatchString
|
grafanaNameRegexp = regexp.MustCompile(grafanaNameFmt).MatchString
|
||||||
|
|
|
@ -198,16 +198,17 @@ func TestValidation(t *testing.T) {
|
||||||
"folders",
|
"folders",
|
||||||
"folders123",
|
"folders123",
|
||||||
"aaa",
|
"aaa",
|
||||||
|
"hello-world",
|
||||||
|
"hello-world-",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
name: "bad input",
|
name: "bad input",
|
||||||
expect: []string{
|
expect: []string{
|
||||||
"resource must consist of alphanumeric characters (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9]*$')",
|
"resource must consist of alphanumeric characters and dashes, and must start with an alphabetic character (e.g. 'dashboards', or 'folders', regex used for validation is '^[A-Za-z][A-Za-z0-9-]*$')",
|
||||||
},
|
},
|
||||||
input: []string{
|
input: []string{
|
||||||
"_bad_input",
|
"_bad_input",
|
||||||
"hello world",
|
"hello world",
|
||||||
"hello-world",
|
|
||||||
"hello!",
|
"hello!",
|
||||||
"hello~",
|
"hello~",
|
||||||
"hello ",
|
"hello ",
|
||||||
|
|
|
@ -120,8 +120,8 @@ func (f *finalizer) processExistingItems(
|
||||||
Group: item.Group,
|
Group: item.Group,
|
||||||
Resource: item.Resource,
|
Resource: item.Resource,
|
||||||
})
|
})
|
||||||
logger.Error("error getting client for resource", "resource", item.Resource, "error", err)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
logger.Error("error getting client for resource", "resource", item.Resource, "error", err)
|
||||||
return count, err
|
return count, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,6 +92,7 @@ type APIBuilder struct {
|
||||||
|
|
||||||
allowedTargets []provisioning.SyncTargetType
|
allowedTargets []provisioning.SyncTargetType
|
||||||
allowImageRendering bool
|
allowImageRendering bool
|
||||||
|
minSyncInterval time.Duration
|
||||||
|
|
||||||
features featuremgmt.FeatureToggles
|
features featuremgmt.FeatureToggles
|
||||||
usageStats usagestats.Service
|
usageStats usagestats.Service
|
||||||
|
@ -144,6 +145,7 @@ func NewAPIBuilder(
|
||||||
allowedTargets []provisioning.SyncTargetType,
|
allowedTargets []provisioning.SyncTargetType,
|
||||||
restConfigGetter func(context.Context) (*clientrest.Config, error),
|
restConfigGetter func(context.Context) (*clientrest.Config, error),
|
||||||
allowImageRendering bool,
|
allowImageRendering bool,
|
||||||
|
minSyncInterval time.Duration,
|
||||||
registry prometheus.Registerer,
|
registry prometheus.Registerer,
|
||||||
newStandaloneClientFactoryFunc func(loopbackConfigProvider apiserver.RestConfigProvider) resources.ClientFactory, // optional, only used for standalone apiserver
|
newStandaloneClientFactoryFunc func(loopbackConfigProvider apiserver.RestConfigProvider) resources.ClientFactory, // optional, only used for standalone apiserver
|
||||||
) *APIBuilder {
|
) *APIBuilder {
|
||||||
|
@ -156,6 +158,11 @@ func NewAPIBuilder(
|
||||||
parsers := resources.NewParserFactory(clients)
|
parsers := resources.NewParserFactory(clients)
|
||||||
resourceLister := resources.NewResourceListerForMigrations(unified, legacyMigrator, storageStatus)
|
resourceLister := resources.NewResourceListerForMigrations(unified, legacyMigrator, storageStatus)
|
||||||
|
|
||||||
|
// do not allow minsync interval to be less than 10
|
||||||
|
if minSyncInterval <= 10*time.Second {
|
||||||
|
minSyncInterval = 10 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
b := &APIBuilder{
|
b := &APIBuilder{
|
||||||
onlyApiServer: onlyApiServer,
|
onlyApiServer: onlyApiServer,
|
||||||
tracer: tracer,
|
tracer: tracer,
|
||||||
|
@ -175,6 +182,7 @@ func NewAPIBuilder(
|
||||||
allowedTargets: allowedTargets,
|
allowedTargets: allowedTargets,
|
||||||
restConfigGetter: restConfigGetter,
|
restConfigGetter: restConfigGetter,
|
||||||
allowImageRendering: allowImageRendering,
|
allowImageRendering: allowImageRendering,
|
||||||
|
minSyncInterval: minSyncInterval,
|
||||||
registry: registry,
|
registry: registry,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,6 +269,7 @@ func RegisterAPIService(
|
||||||
allowedTargets,
|
allowedTargets,
|
||||||
nil, // will use loopback instead
|
nil, // will use loopback instead
|
||||||
cfg.ProvisioningAllowImageRendering,
|
cfg.ProvisioningAllowImageRendering,
|
||||||
|
cfg.ProvisioningMinSyncInterval,
|
||||||
reg,
|
reg,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
@ -587,6 +596,11 @@ func (b *APIBuilder) Validate(ctx context.Context, a admission.Attributes, o adm
|
||||||
"sync target is not supported"))
|
"sync target is not supported"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.Spec.Sync.Enabled && cfg.Spec.Sync.IntervalSeconds < int64(b.minSyncInterval.Seconds()) {
|
||||||
|
list = append(list, field.Invalid(field.NewPath("spec", "sync", "intervalSeconds"),
|
||||||
|
cfg.Spec.Sync.IntervalSeconds, fmt.Sprintf("Interval must be at least %d seconds", int64(b.minSyncInterval.Seconds()))))
|
||||||
|
}
|
||||||
|
|
||||||
if !b.allowImageRendering && cfg.Spec.GitHub != nil && cfg.Spec.GitHub.GenerateDashboardPreviews {
|
if !b.allowImageRendering && cfg.Spec.GitHub != nil && cfg.Spec.GitHub.GenerateDashboardPreviews {
|
||||||
list = append(list,
|
list = append(list,
|
||||||
field.Invalid(field.NewPath("spec", "generateDashboardPreviews"),
|
field.Invalid(field.NewPath("spec", "generateDashboardPreviews"),
|
||||||
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
package provisioning
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apiserver/pkg/admission"
|
||||||
|
"k8s.io/apiserver/pkg/authentication/user"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||||
|
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAPIBuilderValidate(t *testing.T) {
|
||||||
|
factory := repository.NewMockFactory(t)
|
||||||
|
mockRepo := repository.NewMockConfigRepository(t)
|
||||||
|
mockRepo.EXPECT().Validate().Return(nil)
|
||||||
|
factory.EXPECT().Build(mock.Anything, mock.Anything).Return(mockRepo, nil)
|
||||||
|
b := &APIBuilder{
|
||||||
|
repoFactory: factory,
|
||||||
|
allowedTargets: []v0alpha1.SyncTargetType{v0alpha1.SyncTargetTypeFolder},
|
||||||
|
allowImageRendering: false,
|
||||||
|
minSyncInterval: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("min sync interval is less than 10 seconds", func(t *testing.T) {
|
||||||
|
cfg := &v0alpha1.Repository{
|
||||||
|
Spec: v0alpha1.RepositorySpec{
|
||||||
|
Title: "repo",
|
||||||
|
Type: v0alpha1.GitHubRepositoryType,
|
||||||
|
Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeFolder, IntervalSeconds: 5},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mockRepo.EXPECT().Config().Return(cfg)
|
||||||
|
|
||||||
|
obj := newRepoObj("repo1", "default", cfg.Spec, v0alpha1.RepositoryStatus{})
|
||||||
|
err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, apierrors.IsInvalid(err))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("image rendering is not enabled", func(t *testing.T) {
|
||||||
|
cfg2 := &v0alpha1.Repository{
|
||||||
|
Spec: v0alpha1.RepositorySpec{
|
||||||
|
Title: "repo",
|
||||||
|
Type: v0alpha1.GitHubRepositoryType,
|
||||||
|
Sync: v0alpha1.SyncOptions{Enabled: false, Target: v0alpha1.SyncTargetTypeFolder},
|
||||||
|
GitHub: &v0alpha1.GitHubRepositoryConfig{URL: "https://github.com/acme/repo", Branch: "main", GenerateDashboardPreviews: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mockRepo.EXPECT().Config().Return(cfg2)
|
||||||
|
|
||||||
|
obj := newRepoObj("repo2", "default", cfg2.Spec, v0alpha1.RepositoryStatus{})
|
||||||
|
err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, apierrors.IsInvalid(err))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("sync target is not supported", func(t *testing.T) {
|
||||||
|
cfg3 := &v0alpha1.Repository{
|
||||||
|
Spec: v0alpha1.RepositorySpec{
|
||||||
|
Title: "repo",
|
||||||
|
Type: v0alpha1.GitHubRepositoryType,
|
||||||
|
Sync: v0alpha1.SyncOptions{Enabled: true, Target: v0alpha1.SyncTargetTypeInstance},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
mockRepo.EXPECT().Config().Return(cfg3)
|
||||||
|
|
||||||
|
obj := newRepoObj("repo3", "default", cfg3.Spec, v0alpha1.RepositoryStatus{})
|
||||||
|
err := b.Validate(context.Background(), newAttributes(obj, nil, admission.Create), nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, apierrors.IsInvalid(err))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRepoObj(name string, ns string, spec v0alpha1.RepositorySpec, status v0alpha1.RepositoryStatus) *v0alpha1.Repository {
|
||||||
|
return &v0alpha1.Repository{
|
||||||
|
TypeMeta: metav1.TypeMeta{APIVersion: v0alpha1.APIVERSION, Kind: "Repository"},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns},
|
||||||
|
Spec: spec,
|
||||||
|
Status: status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAttributes(obj, old runtime.Object, op admission.Operation) admission.Attributes {
|
||||||
|
return admission.NewAttributesRecord(
|
||||||
|
obj,
|
||||||
|
old,
|
||||||
|
v0alpha1.RepositoryResourceInfo.GroupVersionKind(),
|
||||||
|
"default",
|
||||||
|
func() string {
|
||||||
|
if obj != nil {
|
||||||
|
return obj.(*v0alpha1.Repository).Name
|
||||||
|
}
|
||||||
|
if old != nil {
|
||||||
|
return old.(*v0alpha1.Repository).Name
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}(),
|
||||||
|
v0alpha1.RepositoryResourceInfo.GroupVersionResource(),
|
||||||
|
"",
|
||||||
|
op,
|
||||||
|
nil,
|
||||||
|
false,
|
||||||
|
&user.DefaultInfo{},
|
||||||
|
)
|
||||||
|
}
|
|
@ -187,7 +187,7 @@ func (s *ModuleServer) Run() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig)
|
return sql.ProvideUnifiedStorageGrpcService(s.cfg, s.features, nil, s.log, s.registerer, docBuilders, s.storageMetrics, s.indexMetrics, s.searchServerRing, s.MemberlistKVConfig, s.httpServerRouter)
|
||||||
})
|
})
|
||||||
|
|
||||||
m.RegisterModule(modules.ZanzanaServer, func() (services.Service, error) {
|
m.RegisterModule(modules.ZanzanaServer, func() (services.Service, error) {
|
||||||
|
|
|
@ -1520,6 +1520,16 @@ var (
|
||||||
FrontendOnly: true,
|
FrontendOnly: true,
|
||||||
HideFromDocs: true,
|
HideFromDocs: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "alertingUseNewSimplifiedRoutingHashAlgorithm",
|
||||||
|
Description: "",
|
||||||
|
Stage: FeatureStagePublicPreview,
|
||||||
|
Owner: grafanaAlertingSquad,
|
||||||
|
HideFromAdminPage: true,
|
||||||
|
HideFromDocs: true,
|
||||||
|
RequiresRestart: true,
|
||||||
|
Expression: "true",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "useScopesNavigationEndpoint",
|
Name: "useScopesNavigationEndpoint",
|
||||||
Description: "Use the scopes navigation endpoint instead of the dashboardbindings endpoint",
|
Description: "Use the scopes navigation endpoint instead of the dashboardbindings endpoint",
|
||||||
|
|
|
@ -198,6 +198,7 @@ fetchRulesUsingPost,experimental,@grafana/alerting-squad,false,false,false
|
||||||
newLogsPanel,experimental,@grafana/observability-logs,false,false,true
|
newLogsPanel,experimental,@grafana/observability-logs,false,false,true
|
||||||
grafanaconThemes,GA,@grafana/grafana-frontend-platform,false,true,false
|
grafanaconThemes,GA,@grafana/grafana-frontend-platform,false,true,false
|
||||||
alertingJiraIntegration,experimental,@grafana/alerting-squad,false,false,true
|
alertingJiraIntegration,experimental,@grafana/alerting-squad,false,false,true
|
||||||
|
alertingUseNewSimplifiedRoutingHashAlgorithm,preview,@grafana/alerting-squad,false,true,false
|
||||||
useScopesNavigationEndpoint,experimental,@grafana/grafana-frontend-platform,false,false,true
|
useScopesNavigationEndpoint,experimental,@grafana/grafana-frontend-platform,false,false,true
|
||||||
scopeSearchAllLevels,experimental,@grafana/grafana-frontend-platform,false,false,false
|
scopeSearchAllLevels,experimental,@grafana/grafana-frontend-platform,false,false,false
|
||||||
alertingRuleVersionHistoryRestore,GA,@grafana/alerting-squad,false,false,true
|
alertingRuleVersionHistoryRestore,GA,@grafana/alerting-squad,false,false,true
|
||||||
|
|
|
|
@ -803,6 +803,9 @@ const (
|
||||||
// Enables the new Jira integration for contact points in cloud alert managers.
|
// Enables the new Jira integration for contact points in cloud alert managers.
|
||||||
FlagAlertingJiraIntegration = "alertingJiraIntegration"
|
FlagAlertingJiraIntegration = "alertingJiraIntegration"
|
||||||
|
|
||||||
|
// FlagAlertingUseNewSimplifiedRoutingHashAlgorithm
|
||||||
|
FlagAlertingUseNewSimplifiedRoutingHashAlgorithm = "alertingUseNewSimplifiedRoutingHashAlgorithm"
|
||||||
|
|
||||||
// FlagUseScopesNavigationEndpoint
|
// FlagUseScopesNavigationEndpoint
|
||||||
// Use the scopes navigation endpoint instead of the dashboardbindings endpoint
|
// Use the scopes navigation endpoint instead of the dashboardbindings endpoint
|
||||||
FlagUseScopesNavigationEndpoint = "useScopesNavigationEndpoint"
|
FlagUseScopesNavigationEndpoint = "useScopesNavigationEndpoint"
|
||||||
|
|
|
@ -610,6 +610,46 @@
|
||||||
"expression": "true"
|
"expression": "true"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"name": "alertingUseNewSimplifiedRoutingHashAlgorithm",
|
||||||
|
"resourceVersion": "1759339813575",
|
||||||
|
"creationTimestamp": "2025-10-01T17:28:42Z",
|
||||||
|
"deletionTimestamp": "2025-10-01T17:29:29Z",
|
||||||
|
"annotations": {
|
||||||
|
"grafana.app/updatedTimestamp": "2025-10-01 17:30:13.575464 +0000 UTC"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"description": "",
|
||||||
|
"stage": "preview",
|
||||||
|
"codeowner": "@grafana/alerting-squad",
|
||||||
|
"requiresRestart": true,
|
||||||
|
"hideFromAdminPage": true,
|
||||||
|
"hideFromDocs": true,
|
||||||
|
"expression": "true"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"metadata": {
|
||||||
|
"name": "alertingUseOldSimplifiedRoutingHashAlgorithm",
|
||||||
|
"resourceVersion": "1759339782639",
|
||||||
|
"creationTimestamp": "2025-10-01T17:29:29Z",
|
||||||
|
"deletionTimestamp": "2025-10-01T17:30:13Z",
|
||||||
|
"annotations": {
|
||||||
|
"grafana.app/updatedTimestamp": "2025-10-01 17:29:42.63941 +0000 UTC"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"description": "",
|
||||||
|
"stage": "deprecated",
|
||||||
|
"codeowner": "@grafana/alerting-squad",
|
||||||
|
"requiresRestart": true,
|
||||||
|
"hideFromAdminPage": true,
|
||||||
|
"hideFromDocs": true,
|
||||||
|
"expression": "false"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "alertmanagerRemotePrimary",
|
"name": "alertmanagerRemotePrimary",
|
||||||
|
|
|
@ -291,7 +291,8 @@ func TestAlertmanagerAutogenConfig(t *testing.T) {
|
||||||
1: {AlertmanagerConfiguration: validConfig, OrgID: 1},
|
1: {AlertmanagerConfiguration: validConfig, OrgID: 1},
|
||||||
2: {AlertmanagerConfiguration: validConfigWithoutAutogen, OrgID: 2},
|
2: {AlertmanagerConfiguration: validConfigWithoutAutogen, OrgID: 2},
|
||||||
}
|
}
|
||||||
sut.mam = createMultiOrgAlertmanager(t, configs)
|
ft := featuremgmt.WithFeatures(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm)
|
||||||
|
sut.mam = createMultiOrgAlertmanager(t, configs, withAMFeatureToggles(ft))
|
||||||
return sut, configs
|
return sut, configs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -577,9 +578,29 @@ func createSut(t *testing.T) AlertmanagerSrv {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration) *notifier.MultiOrgAlertmanager {
|
type createMultiOrgAMOptions struct {
|
||||||
|
featureToggles featuremgmt.FeatureToggles
|
||||||
|
}
|
||||||
|
|
||||||
|
type createMultiOrgAMOptionsFunc func(*createMultiOrgAMOptions)
|
||||||
|
|
||||||
|
func withAMFeatureToggles(toggles featuremgmt.FeatureToggles) createMultiOrgAMOptionsFunc {
|
||||||
|
return func(opts *createMultiOrgAMOptions) {
|
||||||
|
opts.featureToggles = toggles
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertConfiguration, opts ...createMultiOrgAMOptionsFunc) *notifier.MultiOrgAlertmanager {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
options := createMultiOrgAMOptions{
|
||||||
|
featureToggles: featuremgmt.WithFeatures(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&options)
|
||||||
|
}
|
||||||
|
|
||||||
configStore := notifier.NewFakeConfigStore(t, configs)
|
configStore := notifier.NewFakeConfigStore(t, configs)
|
||||||
orgStore := notifier.NewFakeOrgStore(t, []int64{1, 2, 3})
|
orgStore := notifier.NewFakeOrgStore(t, []int64{1, 2, 3})
|
||||||
provStore := ngfakes.NewFakeProvisioningStore()
|
provStore := ngfakes.NewFakeProvisioningStore()
|
||||||
|
@ -610,7 +631,7 @@ func createMultiOrgAlertmanager(t *testing.T, configs map[int64]*ngmodels.AlertC
|
||||||
ngfakes.NewFakeReceiverPermissionsService(),
|
ngfakes.NewFakeReceiverPermissionsService(),
|
||||||
log.New("testlogger"),
|
log.New("testlogger"),
|
||||||
secretsService,
|
secretsService,
|
||||||
featuremgmt.WithManager(),
|
options.featureToggles,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -113,7 +113,7 @@ func (srv TestingApiSrv) RouteTestGrafanaRuleConfig(c *contextmodel.ReqContext,
|
||||||
now,
|
now,
|
||||||
rule,
|
rule,
|
||||||
results,
|
results,
|
||||||
state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder),
|
state.GetRuleExtraLabels(log.New("testing"), rule, folder.Fullpath, includeFolder, srv.featureManager),
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||||
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -102,12 +103,12 @@ func (s *NotificationSettings) Validate() error {
|
||||||
// - AutogeneratedRouteLabel: "true"
|
// - AutogeneratedRouteLabel: "true"
|
||||||
// - AutogeneratedRouteReceiverNameLabel: Receiver
|
// - AutogeneratedRouteReceiverNameLabel: Receiver
|
||||||
// - AutogeneratedRouteSettingsHashLabel: Fingerprint (if the NotificationSettings are not all default)
|
// - AutogeneratedRouteSettingsHashLabel: Fingerprint (if the NotificationSettings are not all default)
|
||||||
func (s *NotificationSettings) ToLabels() data.Labels {
|
func (s *NotificationSettings) ToLabels(features featuremgmt.FeatureToggles) data.Labels {
|
||||||
result := make(data.Labels, 3)
|
result := make(data.Labels, 3)
|
||||||
result[AutogeneratedRouteLabel] = "true"
|
result[AutogeneratedRouteLabel] = "true"
|
||||||
result[AutogeneratedRouteReceiverNameLabel] = s.Receiver
|
result[AutogeneratedRouteReceiverNameLabel] = s.Receiver
|
||||||
if !s.IsAllDefault() {
|
if !s.IsAllDefault() {
|
||||||
result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint().String()
|
result[AutogeneratedRouteSettingsHashLabel] = s.Fingerprint(features).String()
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
@ -160,7 +161,7 @@ func NewDefaultNotificationSettings(receiver string) NotificationSettings {
|
||||||
// Fingerprint calculates a hash value to uniquely identify a NotificationSettings by its attributes.
|
// Fingerprint calculates a hash value to uniquely identify a NotificationSettings by its attributes.
|
||||||
// The hash is calculated by concatenating the strings and durations of the NotificationSettings attributes
|
// The hash is calculated by concatenating the strings and durations of the NotificationSettings attributes
|
||||||
// and using an invalid UTF-8 sequence as a separator.
|
// and using an invalid UTF-8 sequence as a separator.
|
||||||
func (s *NotificationSettings) Fingerprint() data.Fingerprint {
|
func (s *NotificationSettings) Fingerprint(features featuremgmt.FeatureToggles) data.Fingerprint {
|
||||||
h := fnv.New64()
|
h := fnv.New64()
|
||||||
tmp := make([]byte, 8)
|
tmp := make([]byte, 8)
|
||||||
|
|
||||||
|
@ -192,7 +193,10 @@ func (s *NotificationSettings) Fingerprint() data.Fingerprint {
|
||||||
}
|
}
|
||||||
// Add a separator between the time intervals to avoid collisions
|
// Add a separator between the time intervals to avoid collisions
|
||||||
// when all settings are the same including interval names except for the interval type (mute vs active).
|
// when all settings are the same including interval names except for the interval type (mute vs active).
|
||||||
_, _ = h.Write([]byte{255})
|
// Use new algorithm by default, unless feature flag is explicitly disabled
|
||||||
|
if features == nil || (features != nil && features.IsEnabledGlobally(featuremgmt.FlagAlertingUseNewSimplifiedRoutingHashAlgorithm)) {
|
||||||
|
_, _ = h.Write([]byte{255})
|
||||||
|
}
|
||||||
for _, interval := range s.ActiveTimeIntervals {
|
for _, interval := range s.ActiveTimeIntervals {
|
||||||
writeString(interval)
|
writeString(interval)
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ func TestNotificationSettingsLabels(t *testing.T) {
|
||||||
|
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
labels := tt.notificationSettings.ToLabels()
|
labels := tt.notificationSettings.ToLabels(nil)
|
||||||
require.Equal(t, tt.labels, labels)
|
require.Equal(t, tt.labels, labels)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ func TestNotificationSettings_TimeIntervals(t *testing.T) {
|
||||||
ActiveTimeIntervals: []string{timeInterval},
|
ActiveTimeIntervals: []string{timeInterval},
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NotEqual(t, activeSettings.Fingerprint(), muteSettings.Fingerprint())
|
require.NotEqual(t, activeSettings.Fingerprint(nil), muteSettings.Fingerprint(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNormalizedGroupBy(t *testing.T) {
|
func TestNormalizedGroupBy(t *testing.T) {
|
||||||
|
|
|
@ -220,7 +220,7 @@ func (ng *AlertNG) init() error {
|
||||||
Timeout: ng.Cfg.UnifiedAlerting.RemoteAlertmanager.Timeout,
|
Timeout: ng.Cfg.UnifiedAlerting.RemoteAlertmanager.Timeout,
|
||||||
}
|
}
|
||||||
autogenFn := func(ctx context.Context, logger log.Logger, orgID int64, cfg *definitions.PostableApiAlertingConfig, skipInvalid bool) error {
|
autogenFn := func(ctx context.Context, logger log.Logger, orgID int64, cfg *definitions.PostableApiAlertingConfig, skipInvalid bool) error {
|
||||||
return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid)
|
return notifier.AddAutogenConfig(ctx, logger, ng.store, orgID, cfg, skipInvalid, ng.FeatureToggles)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will be used by the MOA to create new Alertmanagers.
|
// This function will be used by the MOA to create new Alertmanagers.
|
||||||
|
|
|
@ -56,6 +56,7 @@ type alertmanager struct {
|
||||||
DefaultConfiguration string
|
DefaultConfiguration string
|
||||||
decryptFn alertingNotify.GetDecryptedValueFn
|
decryptFn alertingNotify.GetDecryptedValueFn
|
||||||
crypto Crypto
|
crypto Crypto
|
||||||
|
features featuremgmt.FeatureToggles
|
||||||
}
|
}
|
||||||
|
|
||||||
// maintenanceOptions represent the options for components that need maintenance on a frequency within the Alertmanager.
|
// maintenanceOptions represent the options for components that need maintenance on a frequency within the Alertmanager.
|
||||||
|
@ -155,6 +156,7 @@ func NewAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A
|
||||||
logger: l.New("component", "alertmanager", opts.TenantKey, opts.TenantID), // similar to what the base does
|
logger: l.New("component", "alertmanager", opts.TenantKey, opts.TenantID), // similar to what the base does
|
||||||
decryptFn: decryptFn,
|
decryptFn: decryptFn,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
|
features: featureToggles,
|
||||||
}
|
}
|
||||||
|
|
||||||
return am, nil
|
return am, nil
|
||||||
|
@ -344,7 +346,7 @@ func (am *alertmanager) applyConfig(ctx context.Context, cfg *apimodels.Postable
|
||||||
templates := alertingNotify.PostableAPITemplatesToTemplateDefinitions(cfg.GetMergedTemplateDefinitions())
|
templates := alertingNotify.PostableAPITemplatesToTemplateDefinitions(cfg.GetMergedTemplateDefinitions())
|
||||||
|
|
||||||
// Now add autogenerated config to the route.
|
// Now add autogenerated config to the route.
|
||||||
err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid)
|
err = AddAutogenConfig(ctx, am.logger, am.Store, am.Base.TenantID(), &amConfig, skipInvalid, am.features)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (moa *MultiOrgAlertmanager) GetAlertmanagerConfiguration(ctx context.Contex
|
||||||
// Otherwise, broken settings (e.g. a receiver that doesn't exist) will cause the config returned here to be
|
// Otherwise, broken settings (e.g. a receiver that doesn't exist) will cause the config returned here to be
|
||||||
// different than the config currently in-use.
|
// different than the config currently in-use.
|
||||||
// TODO: Preferably, we'd be getting the config directly from the in-memory AM so adding the autogen config would not be necessary.
|
// TODO: Preferably, we'd be getting the config directly from the in-memory AM so adding the autogen config would not be necessary.
|
||||||
err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true)
|
err := AddAutogenConfig(ctx, moa.logger, moa.configStore, org, &cfg.AlertmanagerConfig, true, moa.featureManager)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return definitions.GettableUserConfig{}, err
|
return definitions.GettableUserConfig{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/infra/log"
|
"github.com/grafana/grafana/pkg/infra/log"
|
||||||
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||||
"github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
|
"github.com/grafana/grafana/pkg/services/ngalert/api/tooling/definitions"
|
||||||
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||||
)
|
)
|
||||||
|
@ -22,8 +23,8 @@ type autogenRuleStore interface {
|
||||||
|
|
||||||
// AddAutogenConfig creates the autogenerated configuration and adds it to the given apiAlertingConfig.
|
// AddAutogenConfig creates the autogenerated configuration and adds it to the given apiAlertingConfig.
|
||||||
// If skipInvalid is true, then invalid notification settings are skipped, otherwise an error is returned.
|
// If skipInvalid is true, then invalid notification settings are skipped, otherwise an error is returned.
|
||||||
func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) error {
|
func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) error {
|
||||||
autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid)
|
autogenRoute, err := newAutogeneratedRoute(ctx, logger, store, orgId, cfg, skipInvalid, features)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -39,7 +40,7 @@ func AddAutogenConfig[R receiver](ctx context.Context, logger log.Logger, store
|
||||||
// newAutogeneratedRoute creates a new autogenerated route based on the notification settings for the given org.
|
// newAutogeneratedRoute creates a new autogenerated route based on the notification settings for the given org.
|
||||||
// cfg is used to construct the settings validator and to ensure we create a dedicated route for each receiver.
|
// cfg is used to construct the settings validator and to ensure we create a dedicated route for each receiver.
|
||||||
// skipInvalid is used to skip invalid settings instead of returning an error.
|
// skipInvalid is used to skip invalid settings instead of returning an error.
|
||||||
func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool) (autogeneratedRoute, error) {
|
func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, store autogenRuleStore, orgId int64, cfg apiAlertingConfig[R], skipInvalid bool, features featuremgmt.FeatureToggles) (autogeneratedRoute, error) {
|
||||||
settings, err := store.ListNotificationSettings(ctx, models.ListNotificationSettingsQuery{OrgID: orgId})
|
settings, err := store.ListNotificationSettings(ctx, models.ListNotificationSettingsQuery{OrgID: orgId})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return autogeneratedRoute{}, fmt.Errorf("failed to list alert rules: %w", err)
|
return autogeneratedRoute{}, fmt.Errorf("failed to list alert rules: %w", err)
|
||||||
|
@ -50,7 +51,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s
|
||||||
// contact point even if no rules are using it. This will prevent race conditions between AM sync and rule sync.
|
// contact point even if no rules are using it. This will prevent race conditions between AM sync and rule sync.
|
||||||
for _, receiver := range cfg.GetReceivers() {
|
for _, receiver := range cfg.GetReceivers() {
|
||||||
setting := models.NewDefaultNotificationSettings(receiver.GetName())
|
setting := models.NewDefaultNotificationSettings(receiver.GetName())
|
||||||
fp := setting.Fingerprint()
|
fp := setting.Fingerprint(features)
|
||||||
notificationSettings[fp] = setting
|
notificationSettings[fp] = setting
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +66,7 @@ func newAutogeneratedRoute[R receiver](ctx context.Context, logger log.Logger, s
|
||||||
}
|
}
|
||||||
return autogeneratedRoute{}, fmt.Errorf("invalid notification settings for rule %s: %w", ruleKey.UID, err)
|
return autogeneratedRoute{}, fmt.Errorf("invalid notification settings for rule %s: %w", ruleKey.UID, err)
|
||||||
}
|
}
|
||||||
fp := setting.Fingerprint()
|
fp := setting.Fingerprint(features)
|
||||||
// Keep only unique settings.
|
// Keep only unique settings.
|
||||||
if _, ok := notificationSettings[fp]; ok {
|
if _, ok := notificationSettings[fp]; ok {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -290,7 +290,7 @@ func TestAddAutogenConfig(t *testing.T) {
|
||||||
store.notificationSettings[orgId][models.AlertRuleKey{OrgID: orgId, UID: util.GenerateShortUID()}] = []models.NotificationSettings{setting}
|
store.notificationSettings[orgId][models.AlertRuleKey{OrgID: orgId, UID: util.GenerateShortUID()}] = []models.NotificationSettings{setting}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid)
|
err := AddAutogenConfig(context.Background(), &logtest.Fake{}, store, orgId, tt.existingConfig, tt.skipInvalid, nil)
|
||||||
if tt.expErrorContains != "" {
|
if tt.expErrorContains != "" {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.ErrorContains(t, err, tt.expErrorContains)
|
require.ErrorContains(t, err, tt.expErrorContains)
|
||||||
|
|
|
@ -471,7 +471,7 @@ func (a *alertRule) evaluate(ctx context.Context, e *Evaluation, span trace.Span
|
||||||
e.scheduledAt,
|
e.scheduledAt,
|
||||||
e.rule,
|
e.rule,
|
||||||
results,
|
results,
|
||||||
state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder),
|
state.GetRuleExtraLabels(logger, e.rule, e.folderTitle, !a.disableGrafanaFolder, a.featureToggles),
|
||||||
func(ctx context.Context, statesToSend state.StateTransitions) {
|
func(ctx context.Context, statesToSend state.StateTransitions) {
|
||||||
start := a.clock.Now()
|
start := a.clock.Now()
|
||||||
alerts := a.send(ctx, logger, statesToSend)
|
alerts := a.send(ctx, logger, statesToSend)
|
||||||
|
|
|
@ -1317,7 +1317,7 @@ func stateForRule(rule *models.AlertRule, ts time.Time, evalState eval.State) *s
|
||||||
for k, v := range rule.Labels {
|
for k, v := range rule.Labels {
|
||||||
s.Labels[k] = v
|
s.Labels[k] = v
|
||||||
}
|
}
|
||||||
for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true) {
|
for k, v := range state.GetRuleExtraLabels(&logtest.Fake{}, rule, "", true, nil) {
|
||||||
if _, ok := s.Labels[k]; !ok {
|
if _, ok := s.Labels[k]; !ok {
|
||||||
s.Labels[k] = v
|
s.Labels[k] = v
|
||||||
}
|
}
|
||||||
|
|
|
@ -304,7 +304,7 @@ func (r ruleWithFolder) Fingerprint() fingerprint {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, setting := range rule.NotificationSettings {
|
for _, setting := range rule.NotificationSettings {
|
||||||
binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint()))
|
binary.LittleEndian.PutUint64(tmp, uint64(setting.Fingerprint(nil)))
|
||||||
writeBytes(tmp)
|
writeBytes(tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"github.com/grafana/grafana/pkg/apimachinery/errutil"
|
"github.com/grafana/grafana/pkg/apimachinery/errutil"
|
||||||
"github.com/grafana/grafana/pkg/expr"
|
"github.com/grafana/grafana/pkg/expr"
|
||||||
"github.com/grafana/grafana/pkg/infra/log"
|
"github.com/grafana/grafana/pkg/infra/log"
|
||||||
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||||
"github.com/grafana/grafana/pkg/services/ngalert/eval"
|
"github.com/grafana/grafana/pkg/services/ngalert/eval"
|
||||||
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
"github.com/grafana/grafana/pkg/services/ngalert/models"
|
||||||
"github.com/grafana/grafana/pkg/services/screenshot"
|
"github.com/grafana/grafana/pkg/services/screenshot"
|
||||||
|
@ -753,7 +754,7 @@ func ParseFormattedState(stateStr string) (eval.State, string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRuleExtraLabels returns a map of built-in labels that should be added to an alert before it is sent to the Alertmanager or its state is cached.
|
// GetRuleExtraLabels returns a map of built-in labels that should be added to an alert before it is sent to the Alertmanager or its state is cached.
|
||||||
func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool) map[string]string {
|
func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string, includeFolder bool, features featuremgmt.FeatureToggles) map[string]string {
|
||||||
extraLabels := make(map[string]string, 4)
|
extraLabels := make(map[string]string, 4)
|
||||||
|
|
||||||
extraLabels[alertingModels.NamespaceUIDLabel] = rule.NamespaceUID
|
extraLabels[alertingModels.NamespaceUIDLabel] = rule.NamespaceUID
|
||||||
|
@ -771,7 +772,7 @@ func GetRuleExtraLabels(l log.Logger, rule *models.AlertRule, folderTitle string
|
||||||
ignored, _ := json.Marshal(rule.NotificationSettings[1:])
|
ignored, _ := json.Marshal(rule.NotificationSettings[1:])
|
||||||
l.Error("Detected multiple notification settings, which is not supported. Only the first will be applied", "ignored_settings", string(ignored))
|
l.Error("Detected multiple notification settings, which is not supported. Only the first will be applied", "ignored_settings", string(ignored))
|
||||||
}
|
}
|
||||||
return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels())
|
return mergeLabels(extraLabels, rule.NotificationSettings[0].ToLabels(features))
|
||||||
}
|
}
|
||||||
return extraLabels
|
return extraLabels
|
||||||
}
|
}
|
||||||
|
|
|
@ -779,7 +779,7 @@ func TestGetRuleExtraLabels(t *testing.T) {
|
||||||
models.RuleUIDLabel: rule.UID,
|
models.RuleUIDLabel: rule.UID,
|
||||||
ngmodels.AutogeneratedRouteLabel: "true",
|
ngmodels.AutogeneratedRouteLabel: "true",
|
||||||
ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver,
|
ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver,
|
||||||
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(),
|
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"ignore_multiple_notifications": {
|
"ignore_multiple_notifications": {
|
||||||
|
@ -794,14 +794,14 @@ func TestGetRuleExtraLabels(t *testing.T) {
|
||||||
models.RuleUIDLabel: rule.UID,
|
models.RuleUIDLabel: rule.UID,
|
||||||
ngmodels.AutogeneratedRouteLabel: "true",
|
ngmodels.AutogeneratedRouteLabel: "true",
|
||||||
ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver,
|
ngmodels.AutogeneratedRouteReceiverNameLabel: ns.Receiver,
|
||||||
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint().String(),
|
ngmodels.AutogeneratedRouteSettingsHashLabel: ns.Fingerprint(nil).String(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder)
|
result := GetRuleExtraLabels(logger, tc.rule, folderTitle, tc.includeFolder, nil)
|
||||||
require.Equal(t, tc.expected, result)
|
require.Equal(t, tc.expected, result)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -334,6 +334,9 @@ func filterOutSpecialDatasources(dash *DashboardSummaryInfo) {
|
||||||
case "-- Dashboard --":
|
case "-- Dashboard --":
|
||||||
// The `Dashboard` datasource refers to the results of the query used in another panel
|
// The `Dashboard` datasource refers to the results of the query used in another panel
|
||||||
continue
|
continue
|
||||||
|
case "grafana":
|
||||||
|
// this is the uid for the -- Grafana -- datasource
|
||||||
|
continue
|
||||||
default:
|
default:
|
||||||
dsRefs = append(dsRefs, ds)
|
dsRefs = append(dsRefs, ds)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,24 +2,12 @@
|
||||||
"id": 250,
|
"id": 250,
|
||||||
"title": "fast streaming",
|
"title": "fast streaming",
|
||||||
"tags": null,
|
"tags": null,
|
||||||
"datasource": [
|
|
||||||
{
|
|
||||||
"uid": "grafana",
|
|
||||||
"type": "datasource"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"panels": [
|
"panels": [
|
||||||
{
|
{
|
||||||
"id": 3,
|
"id": 3,
|
||||||
"title": "Panel Title",
|
"title": "Panel Title",
|
||||||
"type": "timeseries",
|
"type": "timeseries",
|
||||||
"pluginVersion": "7.5.0-pre",
|
"pluginVersion": "7.5.0-pre"
|
||||||
"datasource": [
|
|
||||||
{
|
|
||||||
"uid": "grafana",
|
|
||||||
"type": "datasource"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"schemaVersion": 27,
|
"schemaVersion": 27,
|
||||||
|
|
|
@ -3,10 +3,6 @@
|
||||||
"title": "special ds",
|
"title": "special ds",
|
||||||
"tags": null,
|
"tags": null,
|
||||||
"datasource": [
|
"datasource": [
|
||||||
{
|
|
||||||
"uid": "grafana",
|
|
||||||
"type": "datasource"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"uid": "dgd92lq7k",
|
"uid": "dgd92lq7k",
|
||||||
"type": "frser-sqlite-datasource"
|
"type": "frser-sqlite-datasource"
|
||||||
|
@ -22,10 +18,6 @@
|
||||||
"title": "mixed ds with grafana ds",
|
"title": "mixed ds with grafana ds",
|
||||||
"type": "timeseries",
|
"type": "timeseries",
|
||||||
"datasource": [
|
"datasource": [
|
||||||
{
|
|
||||||
"uid": "grafana",
|
|
||||||
"type": "datasource"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"uid": "dgd92lq7k",
|
"uid": "dgd92lq7k",
|
||||||
"type": "frser-sqlite-datasource"
|
"type": "frser-sqlite-datasource"
|
||||||
|
@ -45,13 +37,7 @@
|
||||||
{
|
{
|
||||||
"id": 6,
|
"id": 6,
|
||||||
"title": "grafana ds",
|
"title": "grafana ds",
|
||||||
"type": "timeseries",
|
"type": "timeseries"
|
||||||
"datasource": [
|
|
||||||
{
|
|
||||||
"uid": "grafana",
|
|
||||||
"type": "datasource"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 2,
|
"id": 2,
|
||||||
|
|
|
@ -431,7 +431,8 @@ func (tapi *TeamAPI) validateTeam(c *contextmodel.ReqContext, teamID int64, prov
|
||||||
return response.Error(http.StatusInternalServerError, "Failed to get Team", err)
|
return response.Error(http.StatusInternalServerError, "Failed to get Team", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if teamDTO.IsProvisioned {
|
isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false)
|
||||||
|
if isGroupSyncEnabled && teamDTO.IsProvisioned {
|
||||||
return response.Error(http.StatusBadRequest, provisionedMessage, err)
|
return response.Error(http.StatusBadRequest, provisionedMessage, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -299,7 +299,8 @@ func (tapi *TeamAPI) removeTeamMember(c *contextmodel.ReqContext) response.Respo
|
||||||
return response.Error(http.StatusInternalServerError, "Failed to get Team", err)
|
return response.Error(http.StatusInternalServerError, "Failed to get Team", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if existingTeam.IsProvisioned {
|
isGroupSyncEnabled := tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").MustBool(false)
|
||||||
|
if isGroupSyncEnabled && existingTeam.IsProvisioned {
|
||||||
return response.Error(http.StatusBadRequest, "Team memberships cannot be updated for provisioned teams", err)
|
return response.Error(http.StatusBadRequest, "Team memberships cannot be updated for provisioned teams", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -169,13 +169,15 @@ func TestUpdateTeamMembersAPIEndpoint(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) {
|
func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsEnabled(t *testing.T) {
|
||||||
server := SetupAPITestServer(t, &teamtest.FakeService{
|
server := SetupAPITestServer(t, &teamtest.FakeService{
|
||||||
ExpectedIsMember: true,
|
ExpectedIsMember: true,
|
||||||
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
|
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
|
||||||
|
}, func(tapi *TeamAPI) {
|
||||||
|
tapi.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("should not be able to update team member from a provisioned team", func(t *testing.T) {
|
t.Run("should not be able to update team member from a provisioned team if team sync is enabled", func(t *testing.T) {
|
||||||
req := webtest.RequestWithSignedInUser(
|
req := webtest.RequestWithSignedInUser(
|
||||||
server.NewRequest(http.MethodPut, "/api/teams/1/members/1", strings.NewReader("{\"permission\": 1}")),
|
server.NewRequest(http.MethodPut, "/api/teams/1/members/1", strings.NewReader("{\"permission\": 1}")),
|
||||||
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
|
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
|
||||||
|
@ -186,7 +188,7 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) {
|
||||||
require.NoError(t, res.Body.Close())
|
require.NoError(t, res.Body.Close())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("should not be able to update team member from a provisioned team by team UID", func(t *testing.T) {
|
t.Run("should not be able to update team member from a provisioned team by team UID if team sync is enabled", func(t *testing.T) {
|
||||||
req := webtest.RequestWithSignedInUser(
|
req := webtest.RequestWithSignedInUser(
|
||||||
server.NewRequest(http.MethodPut, "/api/teams/a00001/members/1", strings.NewReader("{\"permission\": 1}")),
|
server.NewRequest(http.MethodPut, "/api/teams/a00001/members/1", strings.NewReader("{\"permission\": 1}")),
|
||||||
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
|
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
|
||||||
|
@ -198,6 +200,27 @@ func TestUpdateTeamMembersFromProvisionedTeam(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateTeamMembersFromProvisionedTeamWhenGroupSyncIsDisabled(t *testing.T) {
|
||||||
|
t.Run("should be able to delete team member from a provisioned team when SCIM group sync is disabled", func(t *testing.T) {
|
||||||
|
server := SetupAPITestServer(t, nil, func(hs *TeamAPI) {
|
||||||
|
hs.teamService = &teamtest.FakeService{
|
||||||
|
ExpectedIsMember: true,
|
||||||
|
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
|
||||||
|
}
|
||||||
|
hs.teamPermissionsService = &actest.FakePermissionsService{}
|
||||||
|
})
|
||||||
|
|
||||||
|
req := webtest.RequestWithSignedInUser(
|
||||||
|
server.NewRequest(http.MethodDelete, "/api/teams/1/members/1", nil),
|
||||||
|
authedUserWithPermissions(1, 1, []accesscontrol.Permission{{Action: accesscontrol.ActionTeamsPermissionsWrite, Scope: "teams:id:1"}}),
|
||||||
|
)
|
||||||
|
res, err := server.SendJSON(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, http.StatusOK, res.StatusCode)
|
||||||
|
require.NoError(t, res.Body.Close())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestDeleteTeamMembersAPIEndpoint(t *testing.T) {
|
func TestDeleteTeamMembersAPIEndpoint(t *testing.T) {
|
||||||
server := SetupAPITestServer(t, nil, func(hs *TeamAPI) {
|
server := SetupAPITestServer(t, nil, func(hs *TeamAPI) {
|
||||||
hs.teamService = &teamtest.FakeService{
|
hs.teamService = &teamtest.FakeService{
|
||||||
|
@ -236,6 +259,8 @@ func TestDeleteTeamMembersFromProvisionedTeam(t *testing.T) {
|
||||||
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
|
ExpectedTeamDTO: &team.TeamDTO{ID: 1, UID: "a00001", IsProvisioned: true},
|
||||||
}
|
}
|
||||||
hs.teamPermissionsService = &actest.FakePermissionsService{}
|
hs.teamPermissionsService = &actest.FakePermissionsService{}
|
||||||
|
}, func(hs *TeamAPI) {
|
||||||
|
hs.cfg.Raw.Section("auth.scim").Key("group_sync_enabled").SetValue("true")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("should not be able to delete team member from a provisioned team", func(t *testing.T) {
|
t.Run("should not be able to delete team member from a provisioned team", func(t *testing.T) {
|
||||||
|
|
|
@ -138,6 +138,7 @@ type Cfg struct {
|
||||||
ProvisioningDisableControllers bool
|
ProvisioningDisableControllers bool
|
||||||
ProvisioningAllowedTargets []string
|
ProvisioningAllowedTargets []string
|
||||||
ProvisioningAllowImageRendering bool
|
ProvisioningAllowImageRendering bool
|
||||||
|
ProvisioningMinSyncInterval time.Duration
|
||||||
ProvisioningRepositoryTypes []string
|
ProvisioningRepositoryTypes []string
|
||||||
ProvisioningLokiURL string
|
ProvisioningLokiURL string
|
||||||
ProvisioningLokiUser string
|
ProvisioningLokiUser string
|
||||||
|
@ -2125,6 +2126,7 @@ func (cfg *Cfg) readProvisioningSettings(iniFile *ini.File) error {
|
||||||
cfg.ProvisioningAllowedTargets = []string{"instance", "folder"}
|
cfg.ProvisioningAllowedTargets = []string{"instance", "folder"}
|
||||||
}
|
}
|
||||||
cfg.ProvisioningAllowImageRendering = iniFile.Section("provisioning").Key("allow_image_rendering").MustBool(true)
|
cfg.ProvisioningAllowImageRendering = iniFile.Section("provisioning").Key("allow_image_rendering").MustBool(true)
|
||||||
|
cfg.ProvisioningMinSyncInterval = iniFile.Section("provisioning").Key("min_sync_interval").MustDuration(10 * time.Second)
|
||||||
|
|
||||||
// Read job history configuration
|
// Read job history configuration
|
||||||
cfg.ProvisioningLokiURL = valueAsString(iniFile.Section("provisioning"), "loki_url", "")
|
cfg.ProvisioningLokiURL = valueAsString(iniFile.Section("provisioning"), "loki_url", "")
|
||||||
|
|
|
@ -293,13 +293,6 @@ func (s *Storage) Delete(
|
||||||
if err := preconditions.Check(key, out); err != nil {
|
if err := preconditions.Check(key, out); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if preconditions.ResourceVersion != nil {
|
|
||||||
cmd.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if preconditions.UID != nil {
|
if preconditions.UID != nil {
|
||||||
cmd.Uid = string(*preconditions.UID)
|
cmd.Uid = string(*preconditions.UID)
|
||||||
}
|
}
|
||||||
|
@ -319,6 +312,10 @@ func (s *Storage) Delete(
|
||||||
return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_DELETED, key, out, out)
|
return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_DELETED, key, out, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd.ResourceVersion, err = meta.GetResourceVersionInt64()
|
||||||
|
if err != nil {
|
||||||
|
return resource.GetError(resource.AsErrorResult(err))
|
||||||
|
}
|
||||||
rsp, err := s.store.Delete(ctx, cmd)
|
rsp, err := s.store.Delete(ctx, cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resource.GetError(resource.AsErrorResult(err))
|
return resource.GetError(resource.AsErrorResult(err))
|
||||||
|
@ -536,6 +533,18 @@ func (s *Storage) GuaranteedUpdate(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// NOTE: by default, the RV will **not** be set in the preconditions (it is removed here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L187)
|
||||||
|
// instead, the RV check is done with the object from the request itself.
|
||||||
|
//
|
||||||
|
// the object from the request is retrieved in the tryUpdate function (we use the generic k8s store one). this function calls the UpdateObject function here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L653
|
||||||
|
// and that will run a series of transformations: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/rest/update.go#L219
|
||||||
|
//
|
||||||
|
// the specific transformations it runs depends on what type of update it is.
|
||||||
|
// for patch, the transformers are set here and use the patchBytes from the request: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go#L697
|
||||||
|
// for put, it uses the object from the request here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go#L163
|
||||||
|
//
|
||||||
|
// after those transformations, the RV will then be on the object so that the RV check can properly be done here: https://github.com/kubernetes/kubernetes/blob/v1.34.1/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go#L662
|
||||||
|
// it will be compared to the current object that we pass in below from storage.
|
||||||
if preconditions != nil && preconditions.ResourceVersion != nil {
|
if preconditions != nil && preconditions.ResourceVersion != nil {
|
||||||
req.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
|
req.ResourceVersion, err = strconv.ParseInt(*preconditions.ResourceVersion, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -611,41 +620,45 @@ func (s *Storage) GuaranteedUpdate(
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj)
|
v, err := s.prepareObjectForUpdate(ctx, updatedObj, existingObj)
|
||||||
if err != nil {
|
|
||||||
return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_MODIFIED, key, updatedObj, destination)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only update (for real) if the bytes have changed
|
|
||||||
var rv uint64
|
|
||||||
req.Value = v.raw.Bytes()
|
|
||||||
if !bytes.Equal(req.Value, existingBytes) {
|
|
||||||
updateResponse, err := s.store.Update(ctx, req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = resource.GetError(resource.AsErrorResult(err))
|
return s.handleManagedResourceRouting(ctx, err, resourcepb.WatchEvent_MODIFIED, key, updatedObj, destination)
|
||||||
} else if updateResponse.Error != nil {
|
|
||||||
err = resource.GetError(updateResponse.Error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup secure values
|
// Only update (for real) if the bytes have changed
|
||||||
if err = v.finish(ctx, err, s.opts.SecureValues); err != nil {
|
var rv uint64
|
||||||
|
req.Value = v.raw.Bytes()
|
||||||
|
if !bytes.Equal(req.Value, existingBytes) {
|
||||||
|
req.ResourceVersion = readResponse.ResourceVersion
|
||||||
|
updateResponse, err := s.store.Update(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
err = resource.GetError(resource.AsErrorResult(err))
|
||||||
|
} else if updateResponse.Error != nil {
|
||||||
|
if attempt < MaxUpdateAttempts && updateResponse.Error.Code == http.StatusConflict {
|
||||||
|
continue // try the read again
|
||||||
|
}
|
||||||
|
err = resource.GetError(updateResponse.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup secure values
|
||||||
|
if err = v.finish(ctx, err, s.opts.SecureValues); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rv = uint64(updateResponse.ResourceVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := s.convertToObject(req.Value, destination); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rv = uint64(updateResponse.ResourceVersion)
|
if rv > 0 {
|
||||||
}
|
if err := s.versioner.UpdateObject(destination, rv); err != nil {
|
||||||
|
return err
|
||||||
if _, err := s.convertToObject(req.Value, destination); err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if rv > 0 {
|
|
||||||
if err := s.versioner.UpdateObject(destination, rv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -18,8 +18,7 @@ import (
|
||||||
|
|
||||||
// Package-level errors.
|
// Package-level errors.
|
||||||
var (
|
var (
|
||||||
ErrOptimisticLockingFailed = errors.New("optimistic locking failed")
|
ErrNotImplementedYet = errors.New("not implemented yet")
|
||||||
ErrNotImplementedYet = errors.New("not implemented yet")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -31,6 +30,12 @@ var (
|
||||||
Code: http.StatusConflict,
|
Code: http.StatusConflict,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ErrOptimisticLockingFailed = resourcepb.ErrorResult{
|
||||||
|
Code: http.StatusConflict,
|
||||||
|
Reason: "optimistic locking failed",
|
||||||
|
Message: "requested RV does not match saved RV",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewBadRequestError(msg string) *resourcepb.ErrorResult {
|
func NewBadRequestError(msg string) *resourcepb.ErrorResult {
|
||||||
|
|
|
@ -731,8 +731,12 @@ func (s *server) update(ctx context.Context, user claims.AuthInfo, req *resource
|
||||||
return rsp, nil
|
return rsp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: once we know the client is always sending the RV, require ResourceVersion > 0
|
||||||
|
// See: https://github.com/grafana/grafana/pull/111866
|
||||||
if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion {
|
if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion {
|
||||||
return nil, ErrOptimisticLockingFailed
|
return &resourcepb.UpdateResponse{
|
||||||
|
Error: &ErrOptimisticLockingFailed,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
event, e := s.newEvent(ctx, user, req.Key, req.Value, latest.Value)
|
event, e := s.newEvent(ctx, user, req.Key, req.Value, latest.Value)
|
||||||
|
@ -796,7 +800,7 @@ func (s *server) delete(ctx context.Context, user claims.AuthInfo, req *resource
|
||||||
return rsp, nil
|
return rsp, nil
|
||||||
}
|
}
|
||||||
if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion {
|
if req.ResourceVersion > 0 && latest.ResourceVersion != req.ResourceVersion {
|
||||||
rsp.Error = AsErrorResult(ErrOptimisticLockingFailed)
|
rsp.Error = &ErrOptimisticLockingFailed
|
||||||
return rsp, nil
|
return rsp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -477,11 +477,12 @@ func TestSimpleServer(t *testing.T) {
|
||||||
ResourceVersion: created.ResourceVersion})
|
ResourceVersion: created.ResourceVersion})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = server.Update(ctx, &resourcepb.UpdateRequest{
|
rsp, _ := server.Update(ctx, &resourcepb.UpdateRequest{
|
||||||
Key: key,
|
Key: key,
|
||||||
Value: raw,
|
Value: raw,
|
||||||
ResourceVersion: created.ResourceVersion})
|
ResourceVersion: created.ResourceVersion})
|
||||||
require.ErrorIs(t, err, ErrOptimisticLockingFailed)
|
require.Equal(t, rsp.Error.Code, ErrOptimisticLockingFailed.Code)
|
||||||
|
require.Equal(t, rsp.Error.Message, ErrOptimisticLockingFailed.Message)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
"createdBy": "user:be2g71ke8yoe8b",
|
"createdBy": "user:be2g71ke8yoe8b",
|
||||||
"fields": {
|
"fields": {
|
||||||
"ds_types": [
|
"ds_types": [
|
||||||
"datasource",
|
|
||||||
"my-custom-plugin"
|
"my-custom-plugin"
|
||||||
],
|
],
|
||||||
"errors_last_1_days": 1,
|
"errors_last_1_days": 1,
|
||||||
|
@ -47,12 +46,6 @@
|
||||||
"kind": "DataSource",
|
"kind": "DataSource",
|
||||||
"name": "DSUID"
|
"name": "DSUID"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"relation": "depends-on",
|
|
||||||
"group": "datasource",
|
|
||||||
"kind": "DataSource",
|
|
||||||
"name": "grafana"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"relation": "depends-on",
|
"relation": "depends-on",
|
||||||
"group": "dashboards.grafana.app",
|
"group": "dashboards.grafana.app",
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"go.opentelemetry.io/otel/trace/noop"
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
"github.com/grafana/grafana/pkg/util/sqlite"
|
"github.com/grafana/grafana/pkg/util/sqlite"
|
||||||
|
|
||||||
|
@ -405,15 +406,18 @@ func (b *backend) update(ctx context.Context, event resource.WriteEvent) (int64,
|
||||||
// Use rvManager.ExecWithRV instead of direct transaction
|
// Use rvManager.ExecWithRV instead of direct transaction
|
||||||
rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) {
|
rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) {
|
||||||
// 1. Update resource
|
// 1. Update resource
|
||||||
_, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
|
res, err := dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
|
||||||
SQLTemplate: sqltemplate.New(b.dialect),
|
SQLTemplate: sqltemplate.New(b.dialect),
|
||||||
WriteEvent: event,
|
WriteEvent: event, // includes the RV
|
||||||
Folder: folder,
|
Folder: folder,
|
||||||
GUID: event.GUID,
|
GUID: event.GUID,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return event.GUID, fmt.Errorf("resource update: %w", err)
|
return event.GUID, fmt.Errorf("resource update: %w", err)
|
||||||
}
|
}
|
||||||
|
if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil {
|
||||||
|
return event.GUID, err
|
||||||
|
}
|
||||||
|
|
||||||
// 2. Insert into resource history
|
// 2. Insert into resource history
|
||||||
if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{
|
if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{
|
||||||
|
@ -460,7 +464,7 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64,
|
||||||
}
|
}
|
||||||
rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) {
|
rv, err := b.rvManager.ExecWithRV(ctx, event.Key, func(tx db.Tx) (string, error) {
|
||||||
// 1. delete from resource
|
// 1. delete from resource
|
||||||
_, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{
|
res, err := dbutil.Exec(ctx, tx, sqlResourceDelete, sqlResourceRequest{
|
||||||
SQLTemplate: sqltemplate.New(b.dialect),
|
SQLTemplate: sqltemplate.New(b.dialect),
|
||||||
WriteEvent: event,
|
WriteEvent: event,
|
||||||
GUID: event.GUID,
|
GUID: event.GUID,
|
||||||
|
@ -468,6 +472,9 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return event.GUID, fmt.Errorf("delete resource: %w", err)
|
return event.GUID, fmt.Errorf("delete resource: %w", err)
|
||||||
}
|
}
|
||||||
|
if err = b.checkConflict(res, event.Key, event.PreviousRV); err != nil {
|
||||||
|
return event.GUID, err
|
||||||
|
}
|
||||||
|
|
||||||
// 2. Add event to resource history
|
// 2. Add event to resource history
|
||||||
if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{
|
if _, err := dbutil.Exec(ctx, tx, sqlResourceHistoryInsert, sqlResourceRequest{
|
||||||
|
@ -504,6 +511,28 @@ func (b *backend) delete(ctx context.Context, event resource.WriteEvent) (int64,
|
||||||
return rv, nil
|
return rv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *backend) checkConflict(res db.Result, key *resourcepb.ResourceKey, rv int64) error {
|
||||||
|
if rv == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The RV is part of the update request, and it may no longer be the most recent
|
||||||
|
rows, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to verify RV: %w", err)
|
||||||
|
}
|
||||||
|
if rows == 1 {
|
||||||
|
return nil // expected one result
|
||||||
|
}
|
||||||
|
if rows > 0 {
|
||||||
|
return fmt.Errorf("multiple rows effected (%d)", rows)
|
||||||
|
}
|
||||||
|
return apierrors.NewConflict(schema.GroupResource{
|
||||||
|
Group: key.Group,
|
||||||
|
Resource: key.Resource,
|
||||||
|
}, key.Name, fmt.Errorf("resource version does not match current value"))
|
||||||
|
}
|
||||||
|
|
||||||
func (b *backend) ReadResource(ctx context.Context, req *resourcepb.ReadRequest) *resource.BackendReadResponse {
|
func (b *backend) ReadResource(ctx context.Context, req *resourcepb.ReadRequest) *resource.BackendReadResponse {
|
||||||
_, span := b.tracer.Start(ctx, tracePrefix+".Read")
|
_, span := b.tracer.Start(ctx, tracePrefix+".Read")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/DATA-DOG/go-sqlmock"
|
"github.com/DATA-DOG/go-sqlmock"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
|
||||||
|
|
|
@ -5,5 +5,6 @@ DELETE FROM {{ .Ident "resource" }}
|
||||||
AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }}
|
AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }}
|
||||||
{{ if .WriteEvent.Key.Name }}
|
{{ if .WriteEvent.Key.Name }}
|
||||||
AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }}
|
AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }}
|
||||||
|
AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
;
|
;
|
||||||
|
|
|
@ -10,4 +10,5 @@ UPDATE {{ .Ident "resource" }}
|
||||||
AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }}
|
AND {{ .Ident "resource" }} = {{ .Arg .WriteEvent.Key.Resource }}
|
||||||
AND {{ .Ident "namespace" }} = {{ .Arg .WriteEvent.Key.Namespace }}
|
AND {{ .Ident "namespace" }} = {{ .Arg .WriteEvent.Key.Namespace }}
|
||||||
AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }}
|
AND {{ .Ident "name" }} = {{ .Arg .WriteEvent.Key.Name }}
|
||||||
|
AND {{ .Ident "resource_version" }} = {{ .Arg .WriteEvent.PreviousRV }}
|
||||||
;
|
;
|
||||||
|
|
|
@ -87,14 +87,23 @@ func TestIntegrationListIter(t *testing.T) {
|
||||||
Group: item.group,
|
Group: item.group,
|
||||||
Name: item.name,
|
Name: item.name,
|
||||||
},
|
},
|
||||||
Value: item.value,
|
Value: item.value,
|
||||||
PreviousRV: 0,
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to insert test data: %w", err)
|
return fmt.Errorf("failed to insert test data: %w", err)
|
||||||
}
|
}
|
||||||
_, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
|
|
||||||
|
if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdateRV, sqlResourceUpdateRVRequest{
|
||||||
|
SQLTemplate: sqltemplate.New(dialect),
|
||||||
|
GUIDToRV: map[string]int64{
|
||||||
|
item.guid: item.resourceVersion,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("failed to insert test data: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = dbutil.Exec(ctx, tx, sqlResourceUpdate, sqlResourceRequest{
|
||||||
SQLTemplate: sqltemplate.New(dialect),
|
SQLTemplate: sqltemplate.New(dialect),
|
||||||
GUID: item.guid,
|
GUID: item.guid,
|
||||||
ResourceVersion: item.resourceVersion,
|
ResourceVersion: item.resourceVersion,
|
||||||
|
@ -110,8 +119,7 @@ func TestIntegrationListIter(t *testing.T) {
|
||||||
PreviousRV: item.resourceVersion,
|
PreviousRV: item.resourceVersion,
|
||||||
Type: 1,
|
Type: 1,
|
||||||
},
|
},
|
||||||
})
|
}); err != nil {
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to insert resource version: %w", err)
|
return fmt.Errorf("failed to insert resource version: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,21 @@ func TestUnifiedStorageQueries(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "with rv",
|
||||||
|
Data: &sqlResourceRequest{
|
||||||
|
SQLTemplate: mocks.NewTestingSQLTemplate(),
|
||||||
|
WriteEvent: resource.WriteEvent{
|
||||||
|
Key: &resourcepb.ResourceKey{
|
||||||
|
Namespace: "nn",
|
||||||
|
Group: "gg",
|
||||||
|
Resource: "rr",
|
||||||
|
Name: "name",
|
||||||
|
},
|
||||||
|
PreviousRV: 1234,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
sqlResourceInsert: {
|
sqlResourceInsert: {
|
||||||
{
|
{
|
||||||
|
@ -63,6 +78,7 @@ func TestUnifiedStorageQueries(t *testing.T) {
|
||||||
Resource: "rr",
|
Resource: "rr",
|
||||||
Name: "name",
|
Name: "name",
|
||||||
},
|
},
|
||||||
|
PreviousRV: 1759304090100678,
|
||||||
},
|
},
|
||||||
Folder: "fldr",
|
Folder: "fldr",
|
||||||
},
|
},
|
||||||
|
|
|
@ -263,7 +263,7 @@ func (m *resourceVersionManager) execBatch(ctx context.Context, group, resource
|
||||||
attribute.Int("operation_index", i),
|
attribute.Int("operation_index", i),
|
||||||
attribute.String("error", err.Error()),
|
attribute.String("error", err.Error()),
|
||||||
))
|
))
|
||||||
return fmt.Errorf("failed to execute function: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
guids[i] = guid
|
guids[i] = guid
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,10 +6,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
|
@ -94,6 +96,7 @@ func ProvideUnifiedStorageGrpcService(
|
||||||
indexMetrics *resource.BleveIndexMetrics,
|
indexMetrics *resource.BleveIndexMetrics,
|
||||||
searchRing *ring.Ring,
|
searchRing *ring.Ring,
|
||||||
memberlistKVConfig kv.Config,
|
memberlistKVConfig kv.Config,
|
||||||
|
httpServerRouter *mux.Router,
|
||||||
) (UnifiedStorageGrpcService, error) {
|
) (UnifiedStorageGrpcService, error) {
|
||||||
var err error
|
var err error
|
||||||
tracer := otel.Tracer("unified-storage")
|
tracer := otel.Tracer("unified-storage")
|
||||||
|
@ -159,6 +162,10 @@ func ProvideUnifiedStorageGrpcService(
|
||||||
|
|
||||||
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true)
|
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true)
|
||||||
subservices = append(subservices, s.ringLifecycler)
|
subservices = append(subservices, s.ringLifecycler)
|
||||||
|
|
||||||
|
if httpServerRouter != nil {
|
||||||
|
httpServerRouter.Path("/prepare-downscale").Methods("GET", "POST", "DELETE").Handler(http.HandlerFunc(s.PrepareDownscale))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.QOSEnabled {
|
if cfg.QOSEnabled {
|
||||||
|
@ -194,6 +201,21 @@ func ProvideUnifiedStorageGrpcService(
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *service) PrepareDownscale(w http.ResponseWriter, r *http.Request) {
|
||||||
|
switch r.Method {
|
||||||
|
case http.MethodPost:
|
||||||
|
s.log.Info("Preparing for downscale. Will not keep instance in ring on shutdown.")
|
||||||
|
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(false)
|
||||||
|
case http.MethodDelete:
|
||||||
|
s.log.Info("Downscale canceled. Will keep instance in ring on shutdown.")
|
||||||
|
s.ringLifecycler.SetKeepInstanceInTheRingOnShutdown(true)
|
||||||
|
case http.MethodGet:
|
||||||
|
// used for delayed downscale use case, which we don't support. Leaving here for completion sake
|
||||||
|
s.log.Info("Received GET request for prepare-downscale. Behavior not implemented.")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// operation used by the search-servers to check if they own the namespace
|
// operation used by the search-servers to check if they own the namespace
|
||||||
searchOwnerRead = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, nil)
|
searchOwnerRead = ring.NewOp([]ring.InstanceState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, nil)
|
||||||
|
|
|
@ -128,7 +128,7 @@ func TestClientServer(t *testing.T) {
|
||||||
|
|
||||||
features := featuremgmt.WithFeatures()
|
features := featuremgmt.WithFeatures()
|
||||||
|
|
||||||
svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{})
|
svc, err := sql.ProvideUnifiedStorageGrpcService(cfg, features, dbstore, nil, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var client resourcepb.ResourceStoreClient
|
var client resourcepb.ResourceStoreClient
|
||||||
|
|
||||||
|
|
|
@ -4,4 +4,5 @@ DELETE FROM `resource`
|
||||||
AND `group` = 'gg'
|
AND `group` = 'gg'
|
||||||
AND `resource` = 'rr'
|
AND `resource` = 'rr'
|
||||||
AND `name` = 'name'
|
AND `name` = 'name'
|
||||||
|
AND `resource_version` = 0
|
||||||
;
|
;
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
DELETE FROM `resource`
|
||||||
|
WHERE 1 = 1
|
||||||
|
AND `namespace` = 'nn'
|
||||||
|
AND `group` = 'gg'
|
||||||
|
AND `resource` = 'rr'
|
||||||
|
AND `name` = 'name'
|
||||||
|
AND `resource_version` = 1234
|
||||||
|
;
|
|
@ -10,4 +10,5 @@ UPDATE `resource`
|
||||||
AND `resource` = 'rr'
|
AND `resource` = 'rr'
|
||||||
AND `namespace` = 'nn'
|
AND `namespace` = 'nn'
|
||||||
AND `name` = 'name'
|
AND `name` = 'name'
|
||||||
|
AND `resource_version` = 1759304090100678
|
||||||
;
|
;
|
||||||
|
|
|
@ -4,4 +4,5 @@ DELETE FROM "resource"
|
||||||
AND "group" = 'gg'
|
AND "group" = 'gg'
|
||||||
AND "resource" = 'rr'
|
AND "resource" = 'rr'
|
||||||
AND "name" = 'name'
|
AND "name" = 'name'
|
||||||
|
AND "resource_version" = 0
|
||||||
;
|
;
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
DELETE FROM "resource"
|
||||||
|
WHERE 1 = 1
|
||||||
|
AND "namespace" = 'nn'
|
||||||
|
AND "group" = 'gg'
|
||||||
|
AND "resource" = 'rr'
|
||||||
|
AND "name" = 'name'
|
||||||
|
AND "resource_version" = 1234
|
||||||
|
;
|
|
@ -10,4 +10,5 @@ UPDATE "resource"
|
||||||
AND "resource" = 'rr'
|
AND "resource" = 'rr'
|
||||||
AND "namespace" = 'nn'
|
AND "namespace" = 'nn'
|
||||||
AND "name" = 'name'
|
AND "name" = 'name'
|
||||||
|
AND "resource_version" = 1759304090100678
|
||||||
;
|
;
|
||||||
|
|
|
@ -4,4 +4,5 @@ DELETE FROM "resource"
|
||||||
AND "group" = 'gg'
|
AND "group" = 'gg'
|
||||||
AND "resource" = 'rr'
|
AND "resource" = 'rr'
|
||||||
AND "name" = 'name'
|
AND "name" = 'name'
|
||||||
|
AND "resource_version" = 0
|
||||||
;
|
;
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
DELETE FROM "resource"
|
||||||
|
WHERE 1 = 1
|
||||||
|
AND "namespace" = 'nn'
|
||||||
|
AND "group" = 'gg'
|
||||||
|
AND "resource" = 'rr'
|
||||||
|
AND "name" = 'name'
|
||||||
|
AND "resource_version" = 1234
|
||||||
|
;
|
|
@ -10,4 +10,5 @@ UPDATE "resource"
|
||||||
AND "resource" = 'rr'
|
AND "resource" = 'rr'
|
||||||
AND "namespace" = 'nn'
|
AND "namespace" = 'nn'
|
||||||
AND "name" = 'name'
|
AND "name" = 'name'
|
||||||
|
AND "resource_version" = 1759304090100678
|
||||||
;
|
;
|
||||||
|
|
|
@ -124,13 +124,13 @@ func runTestIntegrationBackendHappyPath(t *testing.T, backend resource.StorageBa
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Update item2", func(t *testing.T) {
|
t.Run("Update item2", func(t *testing.T) {
|
||||||
rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rv4, err = writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv4, rv3)
|
require.Greater(t, rv4, rv3)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Delete item1", func(t *testing.T) {
|
t.Run("Delete item1", func(t *testing.T) {
|
||||||
rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rv5, err = writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv5, rv4)
|
require.Greater(t, rv5, rv4)
|
||||||
})
|
})
|
||||||
|
@ -352,10 +352,10 @@ func runTestIntegrationBackendList(t *testing.T, backend resource.StorageBackend
|
||||||
rv5, err := writeEvent(ctx, backend, "item5", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv5, err := writeEvent(ctx, backend, "item5", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv5, rv4)
|
require.Greater(t, rv5, rv4)
|
||||||
rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rv6, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv2))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv6, rv5)
|
require.Greater(t, rv6, rv5)
|
||||||
rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rv7, err := writeEvent(ctx, backend, "item3", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv3))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv7, rv6)
|
require.Greater(t, rv7, rv6)
|
||||||
rv8, err := writeEvent(ctx, backend, "item6", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv8, err := writeEvent(ctx, backend, "item6", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
|
@ -490,10 +490,10 @@ func runTestIntegrationBackendListModifiedSince(t *testing.T, backend resource.S
|
||||||
ns := nsPrefix + "-history-ns"
|
ns := nsPrefix + "-history-ns"
|
||||||
rvCreated, _ := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rvCreated, _ := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.Greater(t, rvCreated, int64(0))
|
require.Greater(t, rvCreated, int64(0))
|
||||||
rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rvUpdated, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvCreated))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvUpdated, rvCreated)
|
require.Greater(t, rvUpdated, rvCreated)
|
||||||
rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rvDeleted, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rvUpdated))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvDeleted, rvUpdated)
|
require.Greater(t, rvDeleted, rvUpdated)
|
||||||
|
|
||||||
|
@ -610,19 +610,19 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
|
||||||
require.Greater(t, rv1, int64(0))
|
require.Greater(t, rv1, int64(0))
|
||||||
|
|
||||||
// add 5 events for item1 - should be saved to history
|
// add 5 events for item1 - should be saved to history
|
||||||
rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rvHistory1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvHistory1, rv1)
|
require.Greater(t, rvHistory1, rv1)
|
||||||
rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rvHistory2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory1))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvHistory2, rvHistory1)
|
require.Greater(t, rvHistory2, rvHistory1)
|
||||||
rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rvHistory3, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory2))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvHistory3, rvHistory2)
|
require.Greater(t, rvHistory3, rvHistory2)
|
||||||
rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rvHistory4, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory3))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvHistory4, rvHistory3)
|
require.Greater(t, rvHistory4, rvHistory3)
|
||||||
rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rvHistory5, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rvHistory4))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvHistory5, rvHistory4)
|
require.Greater(t, rvHistory5, rvHistory4)
|
||||||
|
|
||||||
|
@ -804,8 +804,9 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
|
||||||
resourceVersions = append(resourceVersions, initialRV)
|
resourceVersions = append(resourceVersions, initialRV)
|
||||||
|
|
||||||
// Create 9 more versions with modifications
|
// Create 9 more versions with modifications
|
||||||
|
rv := initialRV
|
||||||
for i := 0; i < 9; i++ {
|
for i := 0; i < 9; i++ {
|
||||||
rv, err := writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns2))
|
rv, err = writeEvent(ctx, backend, "paged-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns2, rv))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resourceVersions = append(resourceVersions, rv)
|
resourceVersions = append(resourceVersions, rv)
|
||||||
}
|
}
|
||||||
|
@ -907,7 +908,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
|
||||||
// Create a resource and delete it
|
// Create a resource and delete it
|
||||||
rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvDeleted, rv)
|
require.Greater(t, rvDeleted, rv)
|
||||||
|
|
||||||
|
@ -932,7 +933,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
|
||||||
// Create a resource and delete it
|
// Create a resource and delete it
|
||||||
rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rvDeleted, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvDeleted, rv)
|
require.Greater(t, rvDeleted, rv)
|
||||||
|
|
||||||
|
@ -940,7 +941,7 @@ func runTestIntegrationBackendListHistory(t *testing.T, backend resource.Storage
|
||||||
rv1, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv1, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv1, rvDeleted)
|
require.Greater(t, rv1, rvDeleted)
|
||||||
rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespace(ns))
|
rv2, err := writeEvent(ctx, backend, "deleted-item", resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, rv1))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv2, rv1)
|
require.Greater(t, rv2, rv1)
|
||||||
|
|
||||||
|
@ -983,8 +984,8 @@ func runTestIntegrationBackendListHistoryErrorReporting(t *testing.T, backend re
|
||||||
|
|
||||||
const events = 500
|
const events = 500
|
||||||
prevRv := origRv
|
prevRv := origRv
|
||||||
for i := 0; i < events; i++ {
|
for range events {
|
||||||
rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespace(ns), WithGroup(group), WithResource(resourceName))
|
rv, err := writeEvent(ctx, backend, name, resourcepb.WatchEvent_MODIFIED, WithNamespaceAndRV(ns, prevRv), WithGroup(group), WithResource(resourceName))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv, prevRv)
|
require.Greater(t, rv, prevRv)
|
||||||
prevRv = rv
|
prevRv = rv
|
||||||
|
@ -1131,6 +1132,14 @@ func runTestIntegrationBackendCreateNewResource(t *testing.T, backend resource.S
|
||||||
// WriteEventOption is a function that modifies WriteEventOptions
|
// WriteEventOption is a function that modifies WriteEventOptions
|
||||||
type WriteEventOption func(*WriteEventOptions)
|
type WriteEventOption func(*WriteEventOptions)
|
||||||
|
|
||||||
|
// WithNamespace sets the namespace for the write event
|
||||||
|
func WithNamespaceAndRV(namespace string, rv int64) WriteEventOption {
|
||||||
|
return func(o *WriteEventOptions) {
|
||||||
|
o.Namespace = namespace
|
||||||
|
o.PreviousRV = rv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithNamespace sets the namespace for the write event
|
// WithNamespace sets the namespace for the write event
|
||||||
func WithNamespace(namespace string) WriteEventOption {
|
func WithNamespace(namespace string) WriteEventOption {
|
||||||
return func(o *WriteEventOptions) {
|
return func(o *WriteEventOptions) {
|
||||||
|
@ -1180,11 +1189,12 @@ func WithValue(value string) WriteEventOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
type WriteEventOptions struct {
|
type WriteEventOptions struct {
|
||||||
Namespace string
|
Namespace string
|
||||||
Group string
|
Group string
|
||||||
Resource string
|
Resource string
|
||||||
Folder string
|
Folder string
|
||||||
Value []byte
|
Value []byte
|
||||||
|
PreviousRV int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeEvent(ctx context.Context, store resource.StorageBackend, name string, action resourcepb.WatchEvent_Type, opts ...WriteEventOption) (int64, error) {
|
func writeEvent(ctx context.Context, store resource.StorageBackend, name string, action resourcepb.WatchEvent_Type, opts ...WriteEventOption) (int64, error) {
|
||||||
|
@ -1236,6 +1246,7 @@ func writeEvent(ctx context.Context, store resource.StorageBackend, name string,
|
||||||
Resource: options.Resource,
|
Resource: options.Resource,
|
||||||
Name: name,
|
Name: name,
|
||||||
},
|
},
|
||||||
|
PreviousRV: options.PreviousRV,
|
||||||
}
|
}
|
||||||
switch action {
|
switch action {
|
||||||
case resourcepb.WatchEvent_DELETED:
|
case resourcepb.WatchEvent_DELETED:
|
||||||
|
@ -1285,18 +1296,15 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken
|
||||||
rv1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv1, int64(0))
|
require.Greater(t, rv1, int64(0))
|
||||||
rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rvDelete1, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv1))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvDelete1, rv1)
|
require.Greater(t, rvDelete1, rv1)
|
||||||
rvDelete2, err := writeEvent(ctx, backend, "item1", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, rvDelete2, rvDelete1)
|
|
||||||
|
|
||||||
// item2 deleted and recreated, should not be returned in trash
|
// item2 deleted and recreated, should not be returned in trash
|
||||||
rv2, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv2, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rv2, int64(0))
|
require.Greater(t, rv2, int64(0))
|
||||||
rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespace(ns))
|
rvDelete3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_DELETED, WithNamespaceAndRV(ns, rv2))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Greater(t, rvDelete3, rv2)
|
require.Greater(t, rvDelete3, rv2)
|
||||||
rv3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
rv3, err := writeEvent(ctx, backend, "item2", resourcepb.WatchEvent_ADDED, WithNamespace(ns))
|
||||||
|
@ -1325,10 +1333,10 @@ func runTestIntegrationBackendTrash(t *testing.T, backend resource.StorageBacken
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedVersions: []int64{rvDelete2},
|
expectedVersions: []int64{rvDelete1},
|
||||||
expectedValues: []string{"item1 DELETED"},
|
expectedValues: []string{"item1 DELETED"},
|
||||||
minExpectedHeadRV: rvDelete2,
|
minExpectedHeadRV: rvDelete1,
|
||||||
expectedContinueRV: rvDelete2,
|
expectedContinueRV: rvDelete1,
|
||||||
expectedSortAsc: false,
|
expectedSortAsc: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -209,7 +209,7 @@
|
||||||
"path": "public/plugins/grafana-azure-monitor-datasource/img/azure_monitor_cpu.png"
|
"path": "public/plugins/grafana-azure-monitor-datasource/img/azure_monitor_cpu.png"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"azure",
|
"azure",
|
||||||
|
@ -880,7 +880,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -934,7 +934,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"grafana",
|
"grafana",
|
||||||
|
@ -1000,7 +1000,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -1217,7 +1217,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -1325,7 +1325,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -1375,7 +1375,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -1425,7 +1425,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -1629,7 +1629,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": [
|
"keywords": [
|
||||||
"grafana",
|
"grafana",
|
||||||
|
@ -1734,12 +1734,12 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"grafanaDependency": "",
|
"grafanaDependency": "\u003e=11.6.0",
|
||||||
"grafanaVersion": "*",
|
"grafanaVersion": "*",
|
||||||
"plugins": [],
|
"plugins": [],
|
||||||
"extensions": {
|
"extensions": {
|
||||||
|
@ -2042,7 +2042,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -2092,7 +2092,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
@ -2445,7 +2445,7 @@
|
||||||
},
|
},
|
||||||
"build": {},
|
"build": {},
|
||||||
"screenshots": null,
|
"screenshots": null,
|
||||||
"version": "12.2.0-pre",
|
"version": "12.3.0-pre",
|
||||||
"updated": "",
|
"updated": "",
|
||||||
"keywords": null
|
"keywords": null
|
||||||
},
|
},
|
||||||
|
|
|
@ -129,7 +129,7 @@ func StartGrafanaEnv(t *testing.T, grafDir, cfgPath string) (string, *server.Tes
|
||||||
var storage sql.UnifiedStorageGrpcService
|
var storage sql.UnifiedStorageGrpcService
|
||||||
if runstore {
|
if runstore {
|
||||||
storage, err = sql.ProvideUnifiedStorageGrpcService(env.Cfg, env.FeatureToggles, env.SQLStore,
|
storage, err = sql.ProvideUnifiedStorageGrpcService(env.Cfg, env.FeatureToggles, env.SQLStore,
|
||||||
env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{})
|
env.Cfg.Logger, prometheus.NewPedanticRegistry(), nil, nil, nil, nil, kv.Config{}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
err = storage.StartAsync(ctx)
|
err = storage.StartAsync(ctx)
|
||||||
|
|
|
@ -0,0 +1,117 @@
|
||||||
|
package pgx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
|
||||||
|
err := e.Ping(ctx)
|
||||||
|
if err != nil {
|
||||||
|
logCheckHealthError(ctx, e.dsInfo, err)
|
||||||
|
if strings.EqualFold(req.PluginContext.User.Role, "Admin") {
|
||||||
|
return ErrToHealthCheckResult(err)
|
||||||
|
}
|
||||||
|
errResponse := &backend.CheckHealthResult{
|
||||||
|
Status: backend.HealthStatusError,
|
||||||
|
Message: e.TransformQueryError(e.log, err).Error(),
|
||||||
|
}
|
||||||
|
return errResponse, nil
|
||||||
|
}
|
||||||
|
return &backend.CheckHealthResult{Status: backend.HealthStatusOk, Message: "Database Connection OK"}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrToHealthCheckResult converts error into user friendly health check message
|
||||||
|
// This should be called with non nil error. If the err parameter is empty, we will send Internal Server Error
|
||||||
|
func ErrToHealthCheckResult(err error) (*backend.CheckHealthResult, error) {
|
||||||
|
if err == nil {
|
||||||
|
return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"}, nil
|
||||||
|
}
|
||||||
|
res := &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: err.Error()}
|
||||||
|
details := map[string]string{
|
||||||
|
"verboseMessage": err.Error(),
|
||||||
|
"errorDetailsLink": "https://grafana.com/docs/grafana/latest/datasources/postgres",
|
||||||
|
}
|
||||||
|
var opErr *net.OpError
|
||||||
|
if errors.As(err, &opErr) {
|
||||||
|
res.Message = "Network error: Failed to connect to the server"
|
||||||
|
if opErr != nil && opErr.Err != nil {
|
||||||
|
errMessage := opErr.Err.Error()
|
||||||
|
if strings.HasSuffix(opErr.Err.Error(), "no such host") {
|
||||||
|
errMessage = "no such host"
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(opErr.Err.Error(), "unknown port") {
|
||||||
|
errMessage = "unknown port"
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(opErr.Err.Error(), "invalid port") {
|
||||||
|
errMessage = "invalid port"
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(opErr.Err.Error(), "missing port in address") {
|
||||||
|
errMessage = "missing port in address"
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(opErr.Err.Error(), "invalid syntax") {
|
||||||
|
errMessage = "invalid syntax found in the address"
|
||||||
|
}
|
||||||
|
res.Message += fmt.Sprintf(". Error message: %s", errMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, sqleng.ErrParsingPostgresURL) {
|
||||||
|
res.Message = fmt.Sprintf("Connection string error: %s", sqleng.ErrParsingPostgresURL.Error())
|
||||||
|
if unwrappedErr := errors.Unwrap(err); unwrappedErr != nil {
|
||||||
|
details["verboseMessage"] = unwrappedErr.Error()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
detailBytes, marshalErr := json.Marshal(details)
|
||||||
|
if marshalErr != nil {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
res.JSONDetails = detailBytes
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func logCheckHealthError(ctx context.Context, dsInfo sqleng.DataSourceInfo, err error) {
|
||||||
|
logger := log.DefaultLogger.FromContext(ctx)
|
||||||
|
configSummary := map[string]any{
|
||||||
|
"config_url_length": len(dsInfo.URL),
|
||||||
|
"config_user_length": len(dsInfo.User),
|
||||||
|
"config_database_length": len(dsInfo.Database),
|
||||||
|
"config_json_data_database_length": len(dsInfo.JsonData.Database),
|
||||||
|
"config_max_open_conns": dsInfo.JsonData.MaxOpenConns,
|
||||||
|
"config_max_idle_conns": dsInfo.JsonData.MaxIdleConns,
|
||||||
|
"config_conn_max_life_time": dsInfo.JsonData.ConnMaxLifetime,
|
||||||
|
"config_conn_timeout": dsInfo.JsonData.ConnectionTimeout,
|
||||||
|
"config_timescaledb": dsInfo.JsonData.Timescaledb,
|
||||||
|
"config_ssl_mode": dsInfo.JsonData.Mode,
|
||||||
|
"config_tls_configuration_method": dsInfo.JsonData.ConfigurationMethod,
|
||||||
|
"config_tls_skip_verify": dsInfo.JsonData.TlsSkipVerify,
|
||||||
|
"config_timezone": dsInfo.JsonData.Timezone,
|
||||||
|
"config_time_interval": dsInfo.JsonData.TimeInterval,
|
||||||
|
"config_enable_secure_proxy": dsInfo.JsonData.SecureDSProxy,
|
||||||
|
"config_allow_clear_text_passwords": dsInfo.JsonData.AllowCleartextPasswords,
|
||||||
|
"config_authentication_type": dsInfo.JsonData.AuthenticationType,
|
||||||
|
"config_ssl_root_cert_file_length": len(dsInfo.JsonData.RootCertFile),
|
||||||
|
"config_ssl_cert_file_length": len(dsInfo.JsonData.CertFile),
|
||||||
|
"config_ssl_key_file_length": len(dsInfo.JsonData.CertKeyFile),
|
||||||
|
"config_encrypt_length": len(dsInfo.JsonData.Encrypt),
|
||||||
|
"config_server_name_length": len(dsInfo.JsonData.Servername),
|
||||||
|
"config_password_length": len(dsInfo.DecryptedSecureJSONData["password"]),
|
||||||
|
"config_tls_ca_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsCACert"]),
|
||||||
|
"config_tls_client_cert_length": len(dsInfo.DecryptedSecureJSONData["tlsClientCert"]),
|
||||||
|
"config_tls_client_key_length": len(dsInfo.DecryptedSecureJSONData["tlsClientKey"]),
|
||||||
|
}
|
||||||
|
configSummaryJSON, marshalError := json.Marshal(configSummary)
|
||||||
|
if marshalError != nil {
|
||||||
|
logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.Error("Check health failed", "error", err, "message_type", "ds_config_health_check_error_detailed", "details", string(configSummaryJSON))
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
package pgx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestErrToHealthCheckResult(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
want *backend.CheckHealthResult
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "without error",
|
||||||
|
want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "Internal Server Error"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "network error",
|
||||||
|
err: errors.Join(errors.New("foo"), &net.OpError{Op: "read", Net: "tcp", Err: errors.New("some op")}),
|
||||||
|
want: &backend.CheckHealthResult{
|
||||||
|
Status: backend.HealthStatusError,
|
||||||
|
Message: "Network error: Failed to connect to the server. Error message: some op",
|
||||||
|
JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"foo\nread tcp: some op"}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "regular error",
|
||||||
|
err: errors.New("internal server error"),
|
||||||
|
want: &backend.CheckHealthResult{
|
||||||
|
Status: backend.HealthStatusError,
|
||||||
|
Message: "internal server error",
|
||||||
|
JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"internal server error"}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid port specifier error",
|
||||||
|
err: fmt.Errorf("%w %q: %w", sqleng.ErrParsingPostgresURL, `"foo.bar.co"`, errors.New(`strconv.Atoi: parsing "foo.bar.co": invalid syntax`)),
|
||||||
|
want: &backend.CheckHealthResult{
|
||||||
|
Status: backend.HealthStatusError,
|
||||||
|
Message: "Connection string error: error parsing postgres url",
|
||||||
|
JSONDetails: []byte(`{"errorDetailsLink":"https://grafana.com/docs/grafana/latest/datasources/postgres","verboseMessage":"error parsing postgres url \"\\\"foo.bar.co\\\"\": strconv.Atoi: parsing \"foo.bar.co\": invalid syntax"}`),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := ErrToHealthCheckResult(tt.err)
|
||||||
|
require.Nil(t, err)
|
||||||
|
assert.Equal(t, string(tt.want.JSONDetails), string(got.JSONDetails))
|
||||||
|
require.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,25 +1,109 @@
|
||||||
package sqleng
|
package pgx
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
|
||||||
"github.com/jackc/pgx/v5/pgconn"
|
"github.com/jackc/pgx/v5/pgconn"
|
||||||
"github.com/jackc/pgx/v5/pgtype"
|
"github.com/jackc/pgx/v5/pgtype"
|
||||||
"github.com/jackc/pgx/v5/pgxpool"
|
"github.com/jackc/pgx/v5/pgxpool"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer,
|
// MetaKeyExecutedQueryString is the key where the executed query should get stored
|
||||||
|
const MetaKeyExecutedQueryString = "executedQueryString"
|
||||||
|
|
||||||
|
// SQLMacroEngine interpolates macros into sql. It takes in the Query to have access to query context and
|
||||||
|
// timeRange to be able to generate queries that use from and to.
|
||||||
|
type SQLMacroEngine interface {
|
||||||
|
Interpolate(query *backend.DataQuery, timeRange backend.TimeRange, sql string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SqlQueryResultTransformer transforms a query result row to RowValues with proper types.
|
||||||
|
type SqlQueryResultTransformer interface {
|
||||||
|
// TransformQueryError transforms a query error.
|
||||||
|
TransformQueryError(logger log.Logger, err error) error
|
||||||
|
GetConverterList() []sqlutil.StringConverter
|
||||||
|
}
|
||||||
|
|
||||||
|
type JsonData struct {
|
||||||
|
MaxOpenConns int `json:"maxOpenConns"`
|
||||||
|
MaxIdleConns int `json:"maxIdleConns"`
|
||||||
|
ConnMaxLifetime int `json:"connMaxLifetime"`
|
||||||
|
ConnectionTimeout int `json:"connectionTimeout"`
|
||||||
|
Timescaledb bool `json:"timescaledb"`
|
||||||
|
Mode string `json:"sslmode"`
|
||||||
|
ConfigurationMethod string `json:"tlsConfigurationMethod"`
|
||||||
|
TlsSkipVerify bool `json:"tlsSkipVerify"`
|
||||||
|
RootCertFile string `json:"sslRootCertFile"`
|
||||||
|
CertFile string `json:"sslCertFile"`
|
||||||
|
CertKeyFile string `json:"sslKeyFile"`
|
||||||
|
Timezone string `json:"timezone"`
|
||||||
|
Encrypt string `json:"encrypt"`
|
||||||
|
Servername string `json:"servername"`
|
||||||
|
TimeInterval string `json:"timeInterval"`
|
||||||
|
Database string `json:"database"`
|
||||||
|
SecureDSProxy bool `json:"enableSecureSocksProxy"`
|
||||||
|
SecureDSProxyUsername string `json:"secureSocksProxyUsername"`
|
||||||
|
AllowCleartextPasswords bool `json:"allowCleartextPasswords"`
|
||||||
|
AuthenticationType string `json:"authenticationType"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataPluginConfiguration struct {
|
||||||
|
DSInfo sqleng.DataSourceInfo
|
||||||
|
TimeColumnNames []string
|
||||||
|
MetricColumnTypes []string
|
||||||
|
RowLimit int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataSourceHandler struct {
|
||||||
|
macroEngine SQLMacroEngine
|
||||||
|
queryResultTransformer SqlQueryResultTransformer
|
||||||
|
timeColumnNames []string
|
||||||
|
metricColumnTypes []string
|
||||||
|
log log.Logger
|
||||||
|
dsInfo sqleng.DataSourceInfo
|
||||||
|
rowLimit int64
|
||||||
|
userError string
|
||||||
|
pool *pgxpool.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
type QueryJson struct {
|
||||||
|
RawSql string `json:"rawSql"`
|
||||||
|
Fill bool `json:"fill"`
|
||||||
|
FillInterval float64 `json:"fillInterval"`
|
||||||
|
FillMode string `json:"fillMode"`
|
||||||
|
FillValue float64 `json:"fillValue"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DataSourceHandler) TransformQueryError(logger log.Logger, err error) error {
|
||||||
|
// OpError is the error type usually returned by functions in the net
|
||||||
|
// package. It describes the operation, network type, and address of
|
||||||
|
// an error. We log this error rather than return it to the client
|
||||||
|
// for security purposes.
|
||||||
|
var opErr *net.OpError
|
||||||
|
if errors.As(err, &opErr) {
|
||||||
|
return fmt.Errorf("failed to connect to server - %s", e.userError)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.queryResultTransformer.TransformQueryError(logger, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewQueryDataHandler(userFacingDefaultError string, p *pgxpool.Pool, config DataPluginConfiguration, queryResultTransformer SqlQueryResultTransformer,
|
||||||
macroEngine SQLMacroEngine, log log.Logger) (*DataSourceHandler, error) {
|
macroEngine SQLMacroEngine, log log.Logger) (*DataSourceHandler, error) {
|
||||||
queryDataHandler := DataSourceHandler{
|
queryDataHandler := DataSourceHandler{
|
||||||
queryResultTransformer: queryResultTransformer,
|
queryResultTransformer: queryResultTransformer,
|
||||||
|
@ -43,7 +127,12 @@ func NewQueryDataHandlerPGX(userFacingDefaultError string, p *pgxpool.Pool, conf
|
||||||
return &queryDataHandler, nil
|
return &queryDataHandler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) DisposePGX() {
|
type DBDataResponse struct {
|
||||||
|
dataResponse backend.DataResponse
|
||||||
|
refID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DataSourceHandler) Dispose() {
|
||||||
e.log.Debug("Disposing DB...")
|
e.log.Debug("Disposing DB...")
|
||||||
|
|
||||||
if e.pool != nil {
|
if e.pool != nil {
|
||||||
|
@ -53,11 +142,11 @@ func (e *DataSourceHandler) DisposePGX() {
|
||||||
e.log.Debug("DB disposed")
|
e.log.Debug("DB disposed")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) PingPGX(ctx context.Context) error {
|
func (e *DataSourceHandler) Ping(ctx context.Context) error {
|
||||||
return e.pool.Ping(ctx)
|
return e.pool.Ping(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
func (e *DataSourceHandler) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
||||||
result := backend.NewQueryDataResponse()
|
result := backend.NewQueryDataResponse()
|
||||||
ch := make(chan DBDataResponse, len(req.Queries))
|
ch := make(chan DBDataResponse, len(req.Queries))
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -83,7 +172,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go e.executeQueryPGX(ctx, query, &wg, ch, queryjson)
|
go e.executeQuery(ctx, query, &wg, ch, queryjson)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -101,7 +190,7 @@ func (e *DataSourceHandler) QueryDataPGX(ctx context.Context, req *backend.Query
|
||||||
func (e *DataSourceHandler) handleQueryError(frameErr string, err error, query string, source backend.ErrorSource, ch chan DBDataResponse, queryResult DBDataResponse) {
|
func (e *DataSourceHandler) handleQueryError(frameErr string, err error, query string, source backend.ErrorSource, ch chan DBDataResponse, queryResult DBDataResponse) {
|
||||||
var emptyFrame data.Frame
|
var emptyFrame data.Frame
|
||||||
emptyFrame.SetMeta(&data.FrameMeta{ExecutedQueryString: query})
|
emptyFrame.SetMeta(&data.FrameMeta{ExecutedQueryString: query})
|
||||||
if backend.IsDownstreamError(err) {
|
if isDownstreamError(err) {
|
||||||
source = backend.ErrorSourceDownstream
|
source = backend.ErrorSourceDownstream
|
||||||
}
|
}
|
||||||
queryResult.dataResponse.Error = fmt.Errorf("%s: %w", frameErr, err)
|
queryResult.dataResponse.Error = fmt.Errorf("%s: %w", frameErr, err)
|
||||||
|
@ -127,6 +216,18 @@ func (e *DataSourceHandler) handlePanic(logger log.Logger, queryResult *DBDataRe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Interpolate provides global macros/substitutions for all sql datasources.
|
||||||
|
var Interpolate = func(query backend.DataQuery, timeRange backend.TimeRange, timeInterval string, sql string) string {
|
||||||
|
interval := query.Interval
|
||||||
|
|
||||||
|
sql = strings.ReplaceAll(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10))
|
||||||
|
sql = strings.ReplaceAll(sql, "$__interval", gtime.FormatInterval(interval))
|
||||||
|
sql = strings.ReplaceAll(sql, "$__unixEpochFrom()", fmt.Sprintf("%d", timeRange.From.UTC().Unix()))
|
||||||
|
sql = strings.ReplaceAll(sql, "$__unixEpochTo()", fmt.Sprintf("%d", timeRange.To.UTC().Unix()))
|
||||||
|
|
||||||
|
return sql
|
||||||
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgconn.Result, error) {
|
func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgconn.Result, error) {
|
||||||
c, err := e.pool.Acquire(ctx)
|
c, err := e.pool.Acquire(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -140,7 +241,7 @@ func (e *DataSourceHandler) execQuery(ctx context.Context, query string) ([]*pgc
|
||||||
return mrr.ReadAll()
|
return mrr.ReadAll()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup,
|
func (e *DataSourceHandler) executeQuery(queryContext context.Context, query backend.DataQuery, wg *sync.WaitGroup,
|
||||||
ch chan DBDataResponse, queryJSON QueryJson) {
|
ch chan DBDataResponse, queryJSON QueryJson) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
queryResult := DBDataResponse{
|
queryResult := DBDataResponse{
|
||||||
|
@ -171,7 +272,7 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
qm, err := e.newProcessCfgPGX(queryContext, query, results, interpolatedQuery)
|
qm, err := e.newProcessCfg(queryContext, query, results, interpolatedQuery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.handleQueryError("failed to get configurations", err, interpolatedQuery, backend.ErrorSourceDownstream, ch, queryResult)
|
e.handleQueryError("failed to get configurations", err, interpolatedQuery, backend.ErrorSourceDownstream, ch, queryResult)
|
||||||
return
|
return
|
||||||
|
@ -186,6 +287,47 @@ func (e *DataSourceHandler) executeQueryPGX(queryContext context.Context, query
|
||||||
e.processFrame(frame, qm, queryResult, ch, logger)
|
e.processFrame(frame, qm, queryResult, ch, logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dataQueryFormat is the type of query.
|
||||||
|
type dataQueryFormat string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// dataQueryFormatTable identifies a table query (default).
|
||||||
|
dataQueryFormatTable dataQueryFormat = "table"
|
||||||
|
// dataQueryFormatSeries identifies a time series query.
|
||||||
|
dataQueryFormatSeries dataQueryFormat = "time_series"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dataQueryModel struct {
|
||||||
|
InterpolatedQuery string // property not set until after Interpolate()
|
||||||
|
Format dataQueryFormat
|
||||||
|
TimeRange backend.TimeRange
|
||||||
|
FillMissing *data.FillMissing // property not set until after Interpolate()
|
||||||
|
Interval time.Duration
|
||||||
|
columnNames []string
|
||||||
|
columnTypes []string
|
||||||
|
timeIndex int
|
||||||
|
timeEndIndex int
|
||||||
|
metricIndex int
|
||||||
|
metricPrefix bool
|
||||||
|
queryContext context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertSQLTimeColumnsToEpochMS(frame *data.Frame, qm *dataQueryModel) error {
|
||||||
|
if qm.timeIndex != -1 {
|
||||||
|
if err := convertSQLTimeColumnToEpochMS(frame, qm.timeIndex); err != nil {
|
||||||
|
return fmt.Errorf("%v: %w", "failed to convert time column", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if qm.timeEndIndex != -1 {
|
||||||
|
if err := convertSQLTimeColumnToEpochMS(frame, qm.timeEndIndex); err != nil {
|
||||||
|
return fmt.Errorf("%v: %w", "failed to convert timeend column", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, queryResult DBDataResponse, ch chan DBDataResponse, logger log.Logger) {
|
func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel, queryResult DBDataResponse, ch chan DBDataResponse, logger log.Logger) {
|
||||||
if frame.Meta == nil {
|
if frame.Meta == nil {
|
||||||
frame.Meta = &data.FrameMeta{}
|
frame.Meta = &data.FrameMeta{}
|
||||||
|
@ -281,10 +423,10 @@ func (e *DataSourceHandler) processFrame(frame *data.Frame, qm *dataQueryModel,
|
||||||
ch <- queryResult
|
ch <- queryResult
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query backend.DataQuery,
|
func (e *DataSourceHandler) newProcessCfg(queryContext context.Context, query backend.DataQuery,
|
||||||
results []*pgconn.Result, interpolatedQuery string) (*dataQueryModel, error) {
|
results []*pgconn.Result, interpolatedQuery string) (*dataQueryModel, error) {
|
||||||
columnNames := []string{}
|
columnNames := []string{}
|
||||||
columnTypesPGX := []string{}
|
columnTypes := []string{}
|
||||||
|
|
||||||
// The results will contain column information in the metadata
|
// The results will contain column information in the metadata
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
|
@ -296,26 +438,26 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query
|
||||||
// Handle special cases for field types
|
// Handle special cases for field types
|
||||||
switch field.DataTypeOID {
|
switch field.DataTypeOID {
|
||||||
case pgtype.TimetzOID:
|
case pgtype.TimetzOID:
|
||||||
columnTypesPGX = append(columnTypesPGX, "timetz")
|
columnTypes = append(columnTypes, "timetz")
|
||||||
case 790:
|
case 790:
|
||||||
columnTypesPGX = append(columnTypesPGX, "money")
|
columnTypes = append(columnTypes, "money")
|
||||||
default:
|
default:
|
||||||
columnTypesPGX = append(columnTypesPGX, "unknown")
|
columnTypes = append(columnTypes, "unknown")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
columnTypesPGX = append(columnTypesPGX, pqtype.Name)
|
columnTypes = append(columnTypes, pqtype.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
qm := &dataQueryModel{
|
qm := &dataQueryModel{
|
||||||
columnTypesPGX: columnTypesPGX,
|
columnTypes: columnTypes,
|
||||||
columnNames: columnNames,
|
columnNames: columnNames,
|
||||||
timeIndex: -1,
|
timeIndex: -1,
|
||||||
timeEndIndex: -1,
|
timeEndIndex: -1,
|
||||||
metricIndex: -1,
|
metricIndex: -1,
|
||||||
metricPrefix: false,
|
metricPrefix: false,
|
||||||
queryContext: queryContext,
|
queryContext: queryContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
queryJSON := QueryJson{}
|
queryJSON := QueryJson{}
|
||||||
|
@ -370,7 +512,7 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query
|
||||||
qm.metricIndex = i
|
qm.metricIndex = i
|
||||||
default:
|
default:
|
||||||
if qm.metricIndex == -1 {
|
if qm.metricIndex == -1 {
|
||||||
columnType := qm.columnTypesPGX[i]
|
columnType := qm.columnTypes[i]
|
||||||
for _, mct := range e.metricColumnTypes {
|
for _, mct := range e.metricColumnTypes {
|
||||||
if columnType == mct {
|
if columnType == mct {
|
||||||
qm.metricIndex = i
|
qm.metricIndex = i
|
||||||
|
@ -596,3 +738,99 @@ func getFieldTypesFromDescriptions(fieldDescriptions []pgconn.FieldDescription,
|
||||||
}
|
}
|
||||||
return fieldTypes, nil
|
return fieldTypes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// convertSQLTimeColumnToEpochMS converts column named time to unix timestamp in milliseconds
|
||||||
|
// to make native datetime types and epoch dates work in annotation and table queries.
|
||||||
|
func convertSQLTimeColumnToEpochMS(frame *data.Frame, timeIndex int) error {
|
||||||
|
if timeIndex < 0 || timeIndex >= len(frame.Fields) {
|
||||||
|
return fmt.Errorf("timeIndex %d is out of range", timeIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
origin := frame.Fields[timeIndex]
|
||||||
|
valueType := origin.Type()
|
||||||
|
if valueType == data.FieldTypeTime || valueType == data.FieldTypeNullableTime {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newField := data.NewFieldFromFieldType(data.FieldTypeNullableTime, 0)
|
||||||
|
newField.Name = origin.Name
|
||||||
|
newField.Labels = origin.Labels
|
||||||
|
|
||||||
|
valueLength := origin.Len()
|
||||||
|
for i := 0; i < valueLength; i++ {
|
||||||
|
v, err := origin.NullableFloatAt(i)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to convert data to a time field")
|
||||||
|
}
|
||||||
|
if v == nil {
|
||||||
|
newField.Append(nil)
|
||||||
|
} else {
|
||||||
|
timestamp := time.Unix(0, int64(epochPrecisionToMS(*v))*int64(time.Millisecond))
|
||||||
|
newField.Append(×tamp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
frame.Fields[timeIndex] = newField
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertSQLValueColumnToFloat converts timeseries value column to float.
|
||||||
|
func convertSQLValueColumnToFloat(frame *data.Frame, Index int) (*data.Frame, error) {
|
||||||
|
if Index < 0 || Index >= len(frame.Fields) {
|
||||||
|
return frame, fmt.Errorf("metricIndex %d is out of range", Index)
|
||||||
|
}
|
||||||
|
|
||||||
|
origin := frame.Fields[Index]
|
||||||
|
valueType := origin.Type()
|
||||||
|
if valueType == data.FieldTypeFloat64 || valueType == data.FieldTypeNullableFloat64 {
|
||||||
|
return frame, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newField := data.NewFieldFromFieldType(data.FieldTypeNullableFloat64, origin.Len())
|
||||||
|
newField.Name = origin.Name
|
||||||
|
newField.Labels = origin.Labels
|
||||||
|
|
||||||
|
for i := 0; i < origin.Len(); i++ {
|
||||||
|
v, err := origin.NullableFloatAt(i)
|
||||||
|
if err != nil {
|
||||||
|
return frame, err
|
||||||
|
}
|
||||||
|
newField.Set(i, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
frame.Fields[Index] = newField
|
||||||
|
|
||||||
|
return frame, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// epochPrecisionToMS converts epoch precision to millisecond, if needed.
|
||||||
|
// Only seconds to milliseconds supported right now
|
||||||
|
func epochPrecisionToMS(value float64) float64 {
|
||||||
|
s := strconv.FormatFloat(value, 'e', -1, 64)
|
||||||
|
if strings.HasSuffix(s, "e+09") {
|
||||||
|
return value * float64(1e3)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(s, "e+18") {
|
||||||
|
return value / float64(time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDownstreamError(err error) bool {
|
||||||
|
if backend.IsDownstreamError(err) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
resultProcessingDownstreamErrors := []error{
|
||||||
|
data.ErrorInputFieldsWithoutRows,
|
||||||
|
data.ErrorSeriesUnsorted,
|
||||||
|
data.ErrorNullTimeValues,
|
||||||
|
}
|
||||||
|
for _, e := range resultProcessingDownstreamErrors {
|
||||||
|
if errors.Is(err, e) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,681 @@
|
||||||
|
package pgx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||||
|
"github.com/jackc/pgx/v5/pgconn"
|
||||||
|
"github.com/jackc/pgx/v5/pgtype"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Pointer[T any](v T) *T { return &v }
|
||||||
|
|
||||||
|
func TestSQLEngine(t *testing.T) {
|
||||||
|
dt := time.Date(2018, 3, 14, 21, 20, 6, int(527345*time.Microsecond), time.UTC)
|
||||||
|
|
||||||
|
t.Run("Handle interpolating $__interval and $__interval_ms", func(t *testing.T) {
|
||||||
|
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||||
|
to := from.Add(5 * time.Minute)
|
||||||
|
timeRange := backend.TimeRange{From: from, To: to}
|
||||||
|
|
||||||
|
text := "$__interval $__timeGroupAlias(time,$__interval) $__interval_ms"
|
||||||
|
|
||||||
|
t.Run("interpolate 10 minutes $__interval", func(t *testing.T) {
|
||||||
|
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Minute * 10}
|
||||||
|
sql := Interpolate(query, timeRange, "", text)
|
||||||
|
require.Equal(t, "10m $__timeGroupAlias(time,10m) 600000", sql)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("interpolate 4seconds $__interval", func(t *testing.T) {
|
||||||
|
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 4}
|
||||||
|
sql := Interpolate(query, timeRange, "", text)
|
||||||
|
require.Equal(t, "4s $__timeGroupAlias(time,4s) 4000", sql)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("interpolate 200 milliseconds $__interval", func(t *testing.T) {
|
||||||
|
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Millisecond * 200}
|
||||||
|
sql := Interpolate(query, timeRange, "", text)
|
||||||
|
require.Equal(t, "200ms $__timeGroupAlias(time,200ms) 200", sql)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given a time range between 2018-04-12 00:00 and 2018-04-12 00:05", func(t *testing.T) {
|
||||||
|
from := time.Date(2018, 4, 12, 18, 0, 0, 0, time.UTC)
|
||||||
|
to := from.Add(5 * time.Minute)
|
||||||
|
timeRange := backend.TimeRange{From: from, To: to}
|
||||||
|
query := backend.DataQuery{JSON: []byte("{}"), MaxDataPoints: 1500, Interval: time.Second * 60}
|
||||||
|
|
||||||
|
t.Run("interpolate __unixEpochFrom function", func(t *testing.T) {
|
||||||
|
sql := Interpolate(query, timeRange, "", "select $__unixEpochFrom()")
|
||||||
|
require.Equal(t, fmt.Sprintf("select %d", from.Unix()), sql)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("interpolate __unixEpochTo function", func(t *testing.T) {
|
||||||
|
sql := Interpolate(query, timeRange, "", "select $__unixEpochTo()")
|
||||||
|
require.Equal(t, fmt.Sprintf("select %d", to.Unix()), sql)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row values with int64 as time columns", func(t *testing.T) {
|
||||||
|
tSeconds := dt.Unix()
|
||||||
|
tMilliseconds := dt.UnixNano() / 1e6
|
||||||
|
tNanoSeconds := dt.UnixNano()
|
||||||
|
var nilPointer *int64
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("time1", nil, []int64{
|
||||||
|
tSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time2", nil, []*int64{
|
||||||
|
Pointer(tSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time3", nil, []int64{
|
||||||
|
tMilliseconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time4", nil, []*int64{
|
||||||
|
Pointer(tMilliseconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time5", nil, []int64{
|
||||||
|
tNanoSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time6", nil, []*int64{
|
||||||
|
Pointer(tNanoSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time7", nil, []*int64{
|
||||||
|
nilPointer,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
|
||||||
|
require.Nil(t, originFrame.Fields[6].At(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row values with uint64 as time columns", func(t *testing.T) {
|
||||||
|
tSeconds := uint64(dt.Unix())
|
||||||
|
tMilliseconds := uint64(dt.UnixNano() / 1e6)
|
||||||
|
tNanoSeconds := uint64(dt.UnixNano())
|
||||||
|
var nilPointer *uint64
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("time1", nil, []uint64{
|
||||||
|
tSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time2", nil, []*uint64{
|
||||||
|
Pointer(tSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time3", nil, []uint64{
|
||||||
|
tMilliseconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time4", nil, []*uint64{
|
||||||
|
Pointer(tMilliseconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time5", nil, []uint64{
|
||||||
|
tNanoSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time6", nil, []*uint64{
|
||||||
|
Pointer(tNanoSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time7", nil, []*uint64{
|
||||||
|
nilPointer,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
|
||||||
|
require.Nil(t, originFrame.Fields[6].At(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row values with int32 as time columns", func(t *testing.T) {
|
||||||
|
tSeconds := int32(dt.Unix())
|
||||||
|
var nilInt *int32
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("time1", nil, []int32{
|
||||||
|
tSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time2", nil, []*int32{
|
||||||
|
Pointer(tSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time7", nil, []*int32{
|
||||||
|
nilInt,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||||
|
require.Nil(t, originFrame.Fields[2].At(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row values with uint32 as time columns", func(t *testing.T) {
|
||||||
|
tSeconds := uint32(dt.Unix())
|
||||||
|
var nilInt *uint32
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("time1", nil, []uint32{
|
||||||
|
tSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time2", nil, []*uint32{
|
||||||
|
Pointer(tSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time7", nil, []*uint32{
|
||||||
|
nilInt,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||||
|
require.Nil(t, originFrame.Fields[2].At(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row values with float64 as time columns", func(t *testing.T) {
|
||||||
|
tSeconds := float64(dt.UnixNano()) / float64(time.Second)
|
||||||
|
tMilliseconds := float64(dt.UnixNano()) / float64(time.Millisecond)
|
||||||
|
tNanoSeconds := float64(dt.UnixNano())
|
||||||
|
var nilPointer *float64
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("time1", nil, []float64{
|
||||||
|
tSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time2", nil, []*float64{
|
||||||
|
Pointer(tSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time3", nil, []float64{
|
||||||
|
tMilliseconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time4", nil, []*float64{
|
||||||
|
Pointer(tMilliseconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time5", nil, []float64{
|
||||||
|
tNanoSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time6", nil, []*float64{
|
||||||
|
Pointer(tNanoSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time7", nil, []*float64{
|
||||||
|
nilPointer,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[2].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[3].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[4].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, dt.Unix(), (*originFrame.Fields[5].At(0).(*time.Time)).Unix())
|
||||||
|
require.Nil(t, originFrame.Fields[6].At(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row values with float32 as time columns", func(t *testing.T) {
|
||||||
|
tSeconds := float32(dt.Unix())
|
||||||
|
var nilInt *float32
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("time1", nil, []float32{
|
||||||
|
tSeconds,
|
||||||
|
}),
|
||||||
|
data.NewField("time2", nil, []*float32{
|
||||||
|
Pointer(tSeconds),
|
||||||
|
}),
|
||||||
|
data.NewField("time7", nil, []*float32{
|
||||||
|
nilInt,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
err := convertSQLTimeColumnToEpochMS(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, int64(tSeconds), (*originFrame.Fields[0].At(0).(*time.Time)).Unix())
|
||||||
|
require.Equal(t, int64(tSeconds), (*originFrame.Fields[1].At(0).(*time.Time)).Unix())
|
||||||
|
require.Nil(t, originFrame.Fields[2].At(0))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row with value columns, would be converted to float64", func(t *testing.T) {
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("value1", nil, []int64{
|
||||||
|
int64(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value2", nil, []*int64{
|
||||||
|
Pointer(int64(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value3", nil, []int32{
|
||||||
|
int32(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value4", nil, []*int32{
|
||||||
|
Pointer(int32(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value5", nil, []int16{
|
||||||
|
int16(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value6", nil, []*int16{
|
||||||
|
Pointer(int16(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value7", nil, []int8{
|
||||||
|
int8(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value8", nil, []*int8{
|
||||||
|
Pointer(int8(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value9", nil, []float64{
|
||||||
|
float64(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value10", nil, []*float64{
|
||||||
|
Pointer(1.0),
|
||||||
|
}),
|
||||||
|
data.NewField("value11", nil, []float32{
|
||||||
|
float32(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value12", nil, []*float32{
|
||||||
|
Pointer(float32(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value13", nil, []uint64{
|
||||||
|
uint64(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value14", nil, []*uint64{
|
||||||
|
Pointer(uint64(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value15", nil, []uint32{
|
||||||
|
uint32(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value16", nil, []*uint32{
|
||||||
|
Pointer(uint32(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value17", nil, []uint16{
|
||||||
|
uint16(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value18", nil, []*uint16{
|
||||||
|
Pointer(uint16(1)),
|
||||||
|
}),
|
||||||
|
data.NewField("value19", nil, []uint8{
|
||||||
|
uint8(1),
|
||||||
|
}),
|
||||||
|
data.NewField("value20", nil, []*uint8{
|
||||||
|
Pointer(uint8(1)),
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
_, err := convertSQLValueColumnToFloat(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if i == 8 {
|
||||||
|
require.Equal(t, float64(1), originFrame.Fields[i].At(0).(float64))
|
||||||
|
} else {
|
||||||
|
require.NotNil(t, originFrame.Fields[i].At(0).(*float64))
|
||||||
|
require.Equal(t, float64(1), *originFrame.Fields[i].At(0).(*float64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Given row with nil value columns", func(t *testing.T) {
|
||||||
|
var int64NilPointer *int64
|
||||||
|
var int32NilPointer *int32
|
||||||
|
var int16NilPointer *int16
|
||||||
|
var int8NilPointer *int8
|
||||||
|
var float64NilPointer *float64
|
||||||
|
var float32NilPointer *float32
|
||||||
|
var uint64NilPointer *uint64
|
||||||
|
var uint32NilPointer *uint32
|
||||||
|
var uint16NilPointer *uint16
|
||||||
|
var uint8NilPointer *uint8
|
||||||
|
|
||||||
|
originFrame := data.NewFrame("",
|
||||||
|
data.NewField("value1", nil, []*int64{
|
||||||
|
int64NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value2", nil, []*int32{
|
||||||
|
int32NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value3", nil, []*int16{
|
||||||
|
int16NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value4", nil, []*int8{
|
||||||
|
int8NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value5", nil, []*float64{
|
||||||
|
float64NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value6", nil, []*float32{
|
||||||
|
float32NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value7", nil, []*uint64{
|
||||||
|
uint64NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value8", nil, []*uint32{
|
||||||
|
uint32NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value9", nil, []*uint16{
|
||||||
|
uint16NilPointer,
|
||||||
|
}),
|
||||||
|
data.NewField("value10", nil, []*uint8{
|
||||||
|
uint8NilPointer,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
for i := 0; i < len(originFrame.Fields); i++ {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
_, err := convertSQLValueColumnToFloat(originFrame, i)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Nil(t, originFrame.Fields[i].At(0))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Should not return raw connection errors", func(t *testing.T) {
|
||||||
|
err := net.OpError{Op: "Dial", Err: fmt.Errorf("inner-error")}
|
||||||
|
transformer := &testQueryResultTransformer{}
|
||||||
|
dp := DataSourceHandler{
|
||||||
|
log: backend.NewLoggerWith("logger", "test"),
|
||||||
|
queryResultTransformer: transformer,
|
||||||
|
}
|
||||||
|
resultErr := dp.TransformQueryError(dp.log, &err)
|
||||||
|
assert.False(t, transformer.transformQueryErrorWasCalled)
|
||||||
|
errorText := resultErr.Error()
|
||||||
|
assert.NotEqual(t, err, resultErr)
|
||||||
|
assert.NotContains(t, errorText, "inner-error")
|
||||||
|
assert.Contains(t, errorText, "failed to connect to server")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Should return non-connection errors unmodified", func(t *testing.T) {
|
||||||
|
err := fmt.Errorf("normal error")
|
||||||
|
transformer := &testQueryResultTransformer{}
|
||||||
|
dp := DataSourceHandler{
|
||||||
|
log: backend.NewLoggerWith("logger", "test"),
|
||||||
|
queryResultTransformer: transformer,
|
||||||
|
}
|
||||||
|
resultErr := dp.TransformQueryError(dp.log, err)
|
||||||
|
assert.True(t, transformer.transformQueryErrorWasCalled)
|
||||||
|
assert.Equal(t, err, resultErr)
|
||||||
|
assert.ErrorIs(t, err, resultErr)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConvertResultsToFrame(t *testing.T) {
|
||||||
|
// Import the pgx packages needed for testing
|
||||||
|
// These imports are included in the main file but need to be accessible for tests
|
||||||
|
t.Run("convertResultsToFrame with single result", func(t *testing.T) {
|
||||||
|
// Create mock field descriptions
|
||||||
|
fieldDescs := []pgconn.FieldDescription{
|
||||||
|
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||||
|
{Name: "name", DataTypeOID: pgtype.TextOID},
|
||||||
|
{Name: "value", DataTypeOID: pgtype.Float8OID},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mock result data
|
||||||
|
mockRows := [][][]byte{
|
||||||
|
{[]byte("1"), []byte("test1"), []byte("10.5")},
|
||||||
|
{[]byte("2"), []byte("test2"), []byte("20.7")},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mock result
|
||||||
|
result := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows,
|
||||||
|
}
|
||||||
|
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{result}
|
||||||
|
|
||||||
|
frame, err := convertResultsToFrame(results, 1000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 3, len(frame.Fields))
|
||||||
|
require.Equal(t, 2, frame.Rows())
|
||||||
|
|
||||||
|
// Verify field names
|
||||||
|
require.Equal(t, "id", frame.Fields[0].Name)
|
||||||
|
require.Equal(t, "name", frame.Fields[1].Name)
|
||||||
|
require.Equal(t, "value", frame.Fields[2].Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) {
|
||||||
|
// Create mock field descriptions (same structure for both results)
|
||||||
|
fieldDescs := []pgconn.FieldDescription{
|
||||||
|
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||||
|
{Name: "name", DataTypeOID: pgtype.TextOID},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create first result
|
||||||
|
mockRows1 := [][][]byte{
|
||||||
|
{[]byte("1"), []byte("test1")},
|
||||||
|
{[]byte("2"), []byte("test2")},
|
||||||
|
}
|
||||||
|
result1 := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows1,
|
||||||
|
}
|
||||||
|
result1.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||||
|
|
||||||
|
// Create second result with same structure
|
||||||
|
mockRows2 := [][][]byte{
|
||||||
|
{[]byte("3"), []byte("test3")},
|
||||||
|
{[]byte("4"), []byte("test4")},
|
||||||
|
}
|
||||||
|
result2 := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows2,
|
||||||
|
}
|
||||||
|
result2.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{result1, result2}
|
||||||
|
|
||||||
|
frame, err := convertResultsToFrame(results, 1000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 2, len(frame.Fields))
|
||||||
|
require.Equal(t, 4, frame.Rows()) // Should have rows from both results
|
||||||
|
|
||||||
|
// Verify field names
|
||||||
|
require.Equal(t, "id", frame.Fields[0].Name)
|
||||||
|
require.Equal(t, "name", frame.Fields[1].Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("convertResultsToFrame with row limit", func(t *testing.T) {
|
||||||
|
// Create mock field descriptions
|
||||||
|
fieldDescs := []pgconn.FieldDescription{
|
||||||
|
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mock result data with 3 rows
|
||||||
|
mockRows := [][][]byte{
|
||||||
|
{[]byte("1")},
|
||||||
|
{[]byte("2")},
|
||||||
|
{[]byte("3")},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows,
|
||||||
|
}
|
||||||
|
result.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{result}
|
||||||
|
|
||||||
|
// Set row limit to 2
|
||||||
|
frame, err := convertResultsToFrame(results, 2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 1, len(frame.Fields))
|
||||||
|
require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows
|
||||||
|
|
||||||
|
// Should have a notice about the limit
|
||||||
|
require.NotNil(t, frame.Meta)
|
||||||
|
require.Len(t, frame.Meta.Notices, 1)
|
||||||
|
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) {
|
||||||
|
// Create a non-SELECT result (should be skipped)
|
||||||
|
nonSelectResult := &pgconn.Result{}
|
||||||
|
nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1")
|
||||||
|
|
||||||
|
// Create a SELECT result
|
||||||
|
fieldDescs := []pgconn.FieldDescription{
|
||||||
|
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||||
|
}
|
||||||
|
mockRows := [][][]byte{
|
||||||
|
{[]byte("1")},
|
||||||
|
}
|
||||||
|
selectResult := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows,
|
||||||
|
}
|
||||||
|
selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{nonSelectResult, selectResult}
|
||||||
|
|
||||||
|
frame, err := convertResultsToFrame(results, 1000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 1, len(frame.Fields))
|
||||||
|
require.Equal(t, 1, frame.Rows())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) {
|
||||||
|
// Create only non-SELECT results
|
||||||
|
result1 := &pgconn.Result{}
|
||||||
|
result1.CommandTag = pgconn.NewCommandTag("UPDATE 1")
|
||||||
|
|
||||||
|
result2 := &pgconn.Result{}
|
||||||
|
result2.CommandTag = pgconn.NewCommandTag("INSERT 1")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{result1, result2}
|
||||||
|
|
||||||
|
frame, err := convertResultsToFrame(results, 1000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 0, len(frame.Fields))
|
||||||
|
require.Equal(t, 0, frame.Rows())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) {
|
||||||
|
// Create mock field descriptions (same structure for both results)
|
||||||
|
fieldDescs := []pgconn.FieldDescription{
|
||||||
|
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create first result with 3 rows
|
||||||
|
mockRows1 := [][][]byte{
|
||||||
|
{[]byte("1")},
|
||||||
|
{[]byte("2")},
|
||||||
|
{[]byte("3")},
|
||||||
|
}
|
||||||
|
result1 := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows1,
|
||||||
|
}
|
||||||
|
result1.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
||||||
|
|
||||||
|
// Create second result with 3 rows
|
||||||
|
mockRows2 := [][][]byte{
|
||||||
|
{[]byte("4")},
|
||||||
|
{[]byte("5")},
|
||||||
|
{[]byte("6")},
|
||||||
|
}
|
||||||
|
result2 := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows2,
|
||||||
|
}
|
||||||
|
result2.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{result1, result2}
|
||||||
|
|
||||||
|
// Set row limit to 2 (should limit each result to 2 rows)
|
||||||
|
frame, err := convertResultsToFrame(results, 2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 1, len(frame.Fields))
|
||||||
|
require.Equal(t, 4, frame.Rows()) // 2 rows from each result
|
||||||
|
|
||||||
|
// Should have notices about the limit from both results
|
||||||
|
require.NotNil(t, frame.Meta)
|
||||||
|
require.Len(t, frame.Meta.Notices, 2)
|
||||||
|
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
|
||||||
|
require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) {
|
||||||
|
// Create mock field descriptions
|
||||||
|
fieldDescs := []pgconn.FieldDescription{
|
||||||
|
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||||
|
{Name: "name", DataTypeOID: pgtype.TextOID},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mock result data with null values
|
||||||
|
mockRows := [][][]byte{
|
||||||
|
{[]byte("1"), nil}, // null name
|
||||||
|
{nil, []byte("test2")}, // null id
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &pgconn.Result{
|
||||||
|
FieldDescriptions: fieldDescs,
|
||||||
|
Rows: mockRows,
|
||||||
|
}
|
||||||
|
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||||
|
|
||||||
|
results := []*pgconn.Result{result}
|
||||||
|
|
||||||
|
frame, err := convertResultsToFrame(results, 1000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, frame)
|
||||||
|
require.Equal(t, 2, len(frame.Fields))
|
||||||
|
require.Equal(t, 2, frame.Rows())
|
||||||
|
|
||||||
|
// Check that null values are handled correctly
|
||||||
|
// The exact representation depends on the field type, but should not panic
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
frame.Fields[0].At(1) // null id
|
||||||
|
frame.Fields[1].At(0) // null name
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type testQueryResultTransformer struct {
|
||||||
|
transformQueryErrorWasCalled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testQueryResultTransformer) TransformQueryError(_ log.Logger, err error) error {
|
||||||
|
t.transformQueryErrorWasCalled = true
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *testQueryResultTransformer) GetConverterList() []sqlutil.StringConverter {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -16,56 +16,14 @@ import (
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||||
"github.com/grafana/grafana/pkg/setting"
|
|
||||||
"github.com/jackc/pgx/v5/pgxpool"
|
"github.com/jackc/pgx/v5/pgxpool"
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
|
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
|
sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx"
|
||||||
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
|
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service {
|
|
||||||
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
|
|
||||||
s := &Service{
|
|
||||||
tlsManager: newTLSManager(logger, cfg.DataPath),
|
|
||||||
pgxTlsManager: newPgxTlsManager(logger),
|
|
||||||
logger: logger,
|
|
||||||
features: features,
|
|
||||||
}
|
|
||||||
s.im = datasource.NewInstanceManager(s.newInstanceSettings())
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
type Service struct {
|
|
||||||
tlsManager tlsSettingsProvider
|
|
||||||
pgxTlsManager *pgxTlsManager
|
|
||||||
im instancemgmt.InstanceManager
|
|
||||||
logger log.Logger
|
|
||||||
features featuremgmt.FeatureToggles
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) {
|
|
||||||
i, err := s.im.Get(ctx, pluginCtx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
instance := i.(*sqleng.DataSourceHandler)
|
|
||||||
return instance, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
|
||||||
dsInfo, err := s.getDSInfo(ctx, req.PluginContext)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
|
|
||||||
return dsInfo.QueryDataPGX(ctx, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
return dsInfo.QueryData(ctx, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*sql.DB, *sqleng.DataSourceHandler, error) {
|
func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*sql.DB, *sqleng.DataSourceHandler, error) {
|
||||||
connector, err := pq.NewConnector(cnnstr)
|
connector, err := pq.NewConnector(cnnstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -115,7 +73,7 @@ func newPostgres(ctx context.Context, userFacingDefaultError string, rowLimit in
|
||||||
return db, handler, nil
|
return db, handler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqleng.DataSourceHandler, error) {
|
func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit int64, dsInfo sqleng.DataSourceInfo, cnnstr string, logger log.Logger, settings backend.DataSourceInstanceSettings) (*pgxpool.Pool, *sqlengpgx.DataSourceHandler, error) {
|
||||||
pgxConf, err := pgxpool.ParseConfig(cnnstr)
|
pgxConf, err := pgxpool.ParseConfig(cnnstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("postgres config creation failed", "error", err)
|
logger.Error("postgres config creation failed", "error", err)
|
||||||
|
@ -144,7 +102,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit
|
||||||
return []string{host}, nil
|
return []string{host}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
config := sqleng.DataPluginConfiguration{
|
config := sqlengpgx.DataPluginConfiguration{
|
||||||
DSInfo: dsInfo,
|
DSInfo: dsInfo,
|
||||||
MetricColumnTypes: []string{"unknown", "text", "varchar", "char", "bpchar"},
|
MetricColumnTypes: []string{"unknown", "text", "varchar", "char", "bpchar"},
|
||||||
RowLimit: rowLimit,
|
RowLimit: rowLimit,
|
||||||
|
@ -160,7 +118,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
handler, err := sqleng.NewQueryDataHandlerPGX(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb),
|
handler, err := sqlengpgx.NewQueryDataHandler(userFacingDefaultError, p, config, &queryResultTransformer, newPostgresMacroEngine(dsInfo.JsonData.Timescaledb),
|
||||||
logger)
|
logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed connecting to Postgres", "err", err)
|
logger.Error("Failed connecting to Postgres", "err", err)
|
||||||
|
@ -171,8 +129,7 @@ func newPostgresPGX(ctx context.Context, userFacingDefaultError string, rowLimit
|
||||||
return p, handler, nil
|
return p, handler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc {
|
func NewInstanceSettings(logger log.Logger, features featuremgmt.FeatureToggles, dataPath string) datasource.InstanceFactoryFunc {
|
||||||
logger := s.logger
|
|
||||||
return func(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
|
return func(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
|
||||||
cfg := backend.GrafanaConfigFromContext(ctx)
|
cfg := backend.GrafanaConfigFromContext(ctx)
|
||||||
sqlCfg, err := cfg.SQL()
|
sqlCfg, err := cfg.SQL()
|
||||||
|
@ -210,49 +167,53 @@ func (s *Service) newInstanceSettings() datasource.InstanceFactoryFunc {
|
||||||
DecryptedSecureJSONData: settings.DecryptedSecureJSONData,
|
DecryptedSecureJSONData: settings.DecryptedSecureJSONData,
|
||||||
}
|
}
|
||||||
|
|
||||||
isPGX := s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX)
|
isPGX := features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX)
|
||||||
|
|
||||||
userFacingDefaultError, err := cfg.UserFacingDefaultError()
|
userFacingDefaultError, err := cfg.UserFacingDefaultError()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var handler instancemgmt.Instance
|
|
||||||
if isPGX {
|
if isPGX {
|
||||||
pgxTlsSettings, err := s.pgxTlsManager.getTLSSettings(dsInfo)
|
pgxlogger := logger.FromContext(ctx).With("driver", "pgx")
|
||||||
|
pgxTlsManager := newPgxTlsManager(pgxlogger)
|
||||||
|
pgxTlsSettings, err := pgxTlsManager.getTLSSettings(dsInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure cleanupCertFiles is called after the connection is opened
|
// Ensure cleanupCertFiles is called after the connection is opened
|
||||||
defer s.pgxTlsManager.cleanupCertFiles(pgxTlsSettings)
|
defer pgxTlsManager.cleanupCertFiles(pgxTlsSettings)
|
||||||
cnnstr, err := s.generateConnectionString(dsInfo, pgxTlsSettings, isPGX)
|
cnnstr, err := generateConnectionString(dsInfo, pgxTlsSettings, isPGX, pgxlogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
_, handler, err = newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings)
|
_, handler, err := newPostgresPGX(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pgxlogger, settings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed connecting to Postgres", "err", err)
|
pgxlogger.Error("Failed connecting to Postgres", "err", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
pgxlogger.Debug("Successfully connected to Postgres")
|
||||||
|
return handler, nil
|
||||||
} else {
|
} else {
|
||||||
tlsSettings, err := s.tlsManager.getTLSSettings(dsInfo)
|
pqlogger := logger.FromContext(ctx).With("driver", "libpq")
|
||||||
|
tlsManager := newTLSManager(pqlogger, dataPath)
|
||||||
|
tlsSettings, err := tlsManager.getTLSSettings(dsInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
cnnstr, err := s.generateConnectionString(dsInfo, tlsSettings, isPGX)
|
cnnstr, err := generateConnectionString(dsInfo, tlsSettings, isPGX, pqlogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
_, handler, err = newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, logger, settings)
|
_, handler, err := newPostgres(ctx, userFacingDefaultError, sqlCfg.RowLimit, dsInfo, cnnstr, pqlogger, settings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed connecting to Postgres", "err", err)
|
pqlogger.Error("Failed connecting to Postgres", "err", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
pqlogger.Debug("Successfully connected to Postgres")
|
||||||
|
return handler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Debug("Successfully connected to Postgres")
|
|
||||||
return handler, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -342,9 +303,7 @@ func buildBaseConnectionString(params connectionParams) string {
|
||||||
return connStr
|
return connStr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool) (string, error) {
|
func generateConnectionString(dsInfo sqleng.DataSourceInfo, tlsSettings tlsSettings, isPGX bool, logger log.Logger) (string, error) {
|
||||||
logger := s.logger
|
|
||||||
|
|
||||||
params, err := parseConnectionParams(dsInfo, logger)
|
params, err := parseConnectionParams(dsInfo, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -387,15 +346,6 @@ func (t *postgresQueryResultTransformer) TransformQueryError(_ log.Logger, err e
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckHealth pings the connected SQL database
|
|
||||||
func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
|
|
||||||
dsHandler, err := s.getDSInfo(ctx, req.PluginContext)
|
|
||||||
if err != nil {
|
|
||||||
return sqleng.ErrToHealthCheckResult(err)
|
|
||||||
}
|
|
||||||
return dsHandler.CheckHealth(ctx, req, s.features)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *postgresQueryResultTransformer) GetConverterList() []sqlutil.StringConverter {
|
func (t *postgresQueryResultTransformer) GetConverterList() []sqlutil.StringConverter {
|
||||||
return []sqlutil.StringConverter{
|
return []sqlutil.StringConverter{
|
||||||
{
|
{
|
||||||
|
|
|
@ -186,7 +186,7 @@ func TestIntegrationPostgresPGXSnapshots(t *testing.T) {
|
||||||
|
|
||||||
query := makeQuery(rawSQL, test.format)
|
query := makeQuery(rawSQL, test.format)
|
||||||
|
|
||||||
result, err := handler.QueryDataPGX(context.Background(), &query)
|
result, err := handler.QueryData(context.Background(), &query)
|
||||||
require.Len(t, result.Responses, 1)
|
require.Len(t, result.Responses, 1)
|
||||||
response, found := result.Responses["A"]
|
response, found := result.Responses["A"]
|
||||||
require.True(t, found)
|
require.True(t, found)
|
||||||
|
|
|
@ -151,10 +151,6 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
t.Run(tt.desc, func(t *testing.T) {
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
svc := Service{
|
|
||||||
logger: backend.NewLoggerWith("logger", "tsdb.postgres"),
|
|
||||||
}
|
|
||||||
|
|
||||||
ds := sqleng.DataSourceInfo{
|
ds := sqleng.DataSourceInfo{
|
||||||
URL: tt.host,
|
URL: tt.host,
|
||||||
User: tt.user,
|
User: tt.user,
|
||||||
|
@ -162,8 +158,9 @@ func TestIntegrationGenerateConnectionStringPGX(t *testing.T) {
|
||||||
Database: tt.database,
|
Database: tt.database,
|
||||||
UID: tt.uid,
|
UID: tt.uid,
|
||||||
}
|
}
|
||||||
|
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
|
||||||
|
|
||||||
connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false)
|
connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger)
|
||||||
|
|
||||||
if tt.expErr == "" {
|
if tt.expErr == "" {
|
||||||
require.NoError(t, err, tt.desc)
|
require.NoError(t, err, tt.desc)
|
||||||
|
@ -284,7 +281,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -383,7 +380,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -426,7 +423,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -460,7 +457,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
frames := queryResult.Frames
|
frames := queryResult.Frames
|
||||||
|
@ -488,7 +485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -542,7 +539,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -589,7 +586,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -624,7 +621,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -741,7 +738,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -765,7 +762,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -789,7 +786,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -813,7 +810,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -837,7 +834,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -861,7 +858,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -885,7 +882,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -910,7 +907,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -934,7 +931,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -959,7 +956,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -991,7 +988,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1026,7 +1023,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1086,7 +1083,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
queryResult := resp.Responses["Deploys"]
|
queryResult := resp.Responses["Deploys"]
|
||||||
|
@ -1113,7 +1110,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
queryResult := resp.Responses["Tickets"]
|
queryResult := resp.Responses["Tickets"]
|
||||||
|
@ -1136,7 +1133,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1161,7 +1158,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1186,7 +1183,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1212,7 +1209,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1238,7 +1235,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1264,7 +1261,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1290,7 +1287,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1338,7 +1335,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := handler.QueryDataPGX(t.Context(), query)
|
resp, err := handler.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1368,7 +1365,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := handler.QueryDataPGX(t.Context(), query)
|
resp, err := handler.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1406,7 +1403,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
|
|
||||||
|
@ -1453,7 +1450,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should not panic and should work correctly
|
// This should not panic and should work correctly
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
@ -1488,7 +1485,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should not panic anymore, but should return an error instead
|
// This should not panic anymore, but should return an error instead
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
|
|
||||||
|
@ -1517,7 +1514,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should not panic anymore, but should return an error instead
|
// This should not panic anymore, but should return an error instead
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
|
|
||||||
|
@ -1546,7 +1543,7 @@ func TestIntegrationPostgresPGX(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This should not panic
|
// This should not panic
|
||||||
resp, err := exe.QueryDataPGX(t.Context(), query)
|
resp, err := exe.QueryData(t.Context(), query)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queryResult := resp.Responses["A"]
|
queryResult := resp.Responses["A"]
|
||||||
require.NoError(t, queryResult.Error)
|
require.NoError(t, queryResult.Error)
|
||||||
|
|
|
@ -0,0 +1,80 @@
|
||||||
|
package postgres
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/datasource"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||||
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
|
sqlengpgx "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/pgx"
|
||||||
|
"github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource/sqleng"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
im instancemgmt.InstanceManager
|
||||||
|
features featuremgmt.FeatureToggles
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProvideService(cfg *setting.Cfg, features featuremgmt.FeatureToggles) *Service {
|
||||||
|
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
|
||||||
|
s := &Service{
|
||||||
|
im: datasource.NewInstanceManager(NewInstanceSettings(logger, features, cfg.DataPath)),
|
||||||
|
features: features,
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside"
|
||||||
|
func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
|
||||||
|
if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
|
||||||
|
dsHandler, err := s.getDSInfoPGX(ctx, req.PluginContext)
|
||||||
|
if err != nil {
|
||||||
|
return sqlengpgx.ErrToHealthCheckResult(err)
|
||||||
|
}
|
||||||
|
return dsHandler.CheckHealth(ctx, req)
|
||||||
|
} else {
|
||||||
|
dsHandler, err := s.getDSInfo(ctx, req.PluginContext)
|
||||||
|
if err != nil {
|
||||||
|
return sqleng.ErrToHealthCheckResult(err)
|
||||||
|
}
|
||||||
|
return dsHandler.CheckHealth(ctx, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: do not put any business logic into this method. it's whole job is to forward the call "inside"
|
||||||
|
func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
|
||||||
|
if s.features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
|
||||||
|
dsInfo, err := s.getDSInfoPGX(ctx, req.PluginContext)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dsInfo.QueryData(ctx, req)
|
||||||
|
} else {
|
||||||
|
dsInfo, err := s.getDSInfo(ctx, req.PluginContext)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dsInfo.QueryData(ctx, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) getDSInfo(ctx context.Context, pluginCtx backend.PluginContext) (*sqleng.DataSourceHandler, error) {
|
||||||
|
i, err := s.im.Get(ctx, pluginCtx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
instance := i.(*sqleng.DataSourceHandler)
|
||||||
|
return instance, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) getDSInfoPGX(ctx context.Context, pluginCtx backend.PluginContext) (*sqlengpgx.DataSourceHandler, error) {
|
||||||
|
i, err := s.im.Get(ctx, pluginCtx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
instance := i.(*sqlengpgx.DataSourceHandler)
|
||||||
|
return instance, nil
|
||||||
|
}
|
|
@ -156,10 +156,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
t.Run(tt.desc, func(t *testing.T) {
|
t.Run(tt.desc, func(t *testing.T) {
|
||||||
svc := Service{
|
logger := backend.NewLoggerWith("logger", "tsdb.postgres")
|
||||||
tlsManager: &tlsTestManager{settings: tt.tlsSettings},
|
|
||||||
logger: backend.NewLoggerWith("logger", "tsdb.postgres"),
|
|
||||||
}
|
|
||||||
|
|
||||||
ds := sqleng.DataSourceInfo{
|
ds := sqleng.DataSourceInfo{
|
||||||
URL: tt.host,
|
URL: tt.host,
|
||||||
|
@ -169,7 +166,7 @@ func TestIntegrationGenerateConnectionString(t *testing.T) {
|
||||||
UID: tt.uid,
|
UID: tt.uid,
|
||||||
}
|
}
|
||||||
|
|
||||||
connStr, err := svc.generateConnectionString(ds, tt.tlsSettings, false)
|
connStr, err := generateConnectionString(ds, tt.tlsSettings, false, logger)
|
||||||
|
|
||||||
if tt.expErr == "" {
|
if tt.expErr == "" {
|
||||||
require.NoError(t, err, tt.desc)
|
require.NoError(t, err, tt.desc)
|
||||||
|
@ -1409,14 +1406,6 @@ func genTimeRangeByInterval(from time.Time, duration time.Duration, interval tim
|
||||||
return timeRange
|
return timeRange
|
||||||
}
|
}
|
||||||
|
|
||||||
type tlsTestManager struct {
|
|
||||||
settings tlsSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *tlsTestManager) getTLSSettings(dsInfo sqleng.DataSourceInfo) (tlsSettings, error) {
|
|
||||||
return m.settings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isTestDbPostgres() bool {
|
func isTestDbPostgres() bool {
|
||||||
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
|
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
|
||||||
return db == "postgres"
|
return db == "postgres"
|
||||||
|
|
|
@ -10,17 +10,11 @@ import (
|
||||||
|
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest, features featuremgmt.FeatureToggles) (*backend.CheckHealthResult, error) {
|
func (e *DataSourceHandler) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
|
||||||
var err error
|
err := e.Ping()
|
||||||
if features.IsEnabled(ctx, featuremgmt.FlagPostgresDSUsePGX) {
|
|
||||||
err = e.PingPGX(ctx)
|
|
||||||
} else {
|
|
||||||
err = e.Ping()
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logCheckHealthError(ctx, e.dsInfo, err)
|
logCheckHealthError(ctx, e.dsInfo, err)
|
||||||
if strings.EqualFold(req.PluginContext.User.Role, "Admin") {
|
if strings.EqualFold(req.PluginContext.User.Role, "Admin") {
|
||||||
|
|
|
@ -19,7 +19,6 @@ import (
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||||
"github.com/jackc/pgx/v5/pgxpool"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MetaKeyExecutedQueryString is the key where the executed query should get stored
|
// MetaKeyExecutedQueryString is the key where the executed query should get stored
|
||||||
|
@ -89,7 +88,6 @@ type DataSourceHandler struct {
|
||||||
dsInfo DataSourceInfo
|
dsInfo DataSourceInfo
|
||||||
rowLimit int64
|
rowLimit int64
|
||||||
userError string
|
userError string
|
||||||
pool *pgxpool.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type QueryJson struct {
|
type QueryJson struct {
|
||||||
|
@ -490,7 +488,6 @@ type dataQueryModel struct {
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
columnNames []string
|
columnNames []string
|
||||||
columnTypes []*sql.ColumnType
|
columnTypes []*sql.ColumnType
|
||||||
columnTypesPGX []string
|
|
||||||
timeIndex int
|
timeIndex int
|
||||||
timeEndIndex int
|
timeEndIndex int
|
||||||
metricIndex int
|
metricIndex int
|
||||||
|
|
|
@ -9,8 +9,6 @@ import (
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||||
"github.com/jackc/pgx/v5/pgconn"
|
|
||||||
"github.com/jackc/pgx/v5/pgtype"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
@ -427,246 +425,6 @@ func TestSQLEngine(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConvertResultsToFrame(t *testing.T) {
|
|
||||||
// Import the pgx packages needed for testing
|
|
||||||
// These imports are included in the main file but need to be accessible for tests
|
|
||||||
t.Run("convertResultsToFrame with single result", func(t *testing.T) {
|
|
||||||
// Create mock field descriptions
|
|
||||||
fieldDescs := []pgconn.FieldDescription{
|
|
||||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
|
||||||
{Name: "name", DataTypeOID: pgtype.TextOID},
|
|
||||||
{Name: "value", DataTypeOID: pgtype.Float8OID},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create mock result data
|
|
||||||
mockRows := [][][]byte{
|
|
||||||
{[]byte("1"), []byte("test1"), []byte("10.5")},
|
|
||||||
{[]byte("2"), []byte("test2"), []byte("20.7")},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create mock result
|
|
||||||
result := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows,
|
|
||||||
}
|
|
||||||
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{result}
|
|
||||||
|
|
||||||
frame, err := convertResultsToFrame(results, 1000)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 3, len(frame.Fields))
|
|
||||||
require.Equal(t, 2, frame.Rows())
|
|
||||||
|
|
||||||
// Verify field names
|
|
||||||
require.Equal(t, "id", frame.Fields[0].Name)
|
|
||||||
require.Equal(t, "name", frame.Fields[1].Name)
|
|
||||||
require.Equal(t, "value", frame.Fields[2].Name)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) {
|
|
||||||
// Create mock field descriptions (same structure for both results)
|
|
||||||
fieldDescs := []pgconn.FieldDescription{
|
|
||||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
|
||||||
{Name: "name", DataTypeOID: pgtype.TextOID},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create first result
|
|
||||||
mockRows1 := [][][]byte{
|
|
||||||
{[]byte("1"), []byte("test1")},
|
|
||||||
{[]byte("2"), []byte("test2")},
|
|
||||||
}
|
|
||||||
result1 := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows1,
|
|
||||||
}
|
|
||||||
result1.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
|
||||||
|
|
||||||
// Create second result with same structure
|
|
||||||
mockRows2 := [][][]byte{
|
|
||||||
{[]byte("3"), []byte("test3")},
|
|
||||||
{[]byte("4"), []byte("test4")},
|
|
||||||
}
|
|
||||||
result2 := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows2,
|
|
||||||
}
|
|
||||||
result2.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{result1, result2}
|
|
||||||
|
|
||||||
frame, err := convertResultsToFrame(results, 1000)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 2, len(frame.Fields))
|
|
||||||
require.Equal(t, 4, frame.Rows()) // Should have rows from both results
|
|
||||||
|
|
||||||
// Verify field names
|
|
||||||
require.Equal(t, "id", frame.Fields[0].Name)
|
|
||||||
require.Equal(t, "name", frame.Fields[1].Name)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("convertResultsToFrame with row limit", func(t *testing.T) {
|
|
||||||
// Create mock field descriptions
|
|
||||||
fieldDescs := []pgconn.FieldDescription{
|
|
||||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create mock result data with 3 rows
|
|
||||||
mockRows := [][][]byte{
|
|
||||||
{[]byte("1")},
|
|
||||||
{[]byte("2")},
|
|
||||||
{[]byte("3")},
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows,
|
|
||||||
}
|
|
||||||
result.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{result}
|
|
||||||
|
|
||||||
// Set row limit to 2
|
|
||||||
frame, err := convertResultsToFrame(results, 2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 1, len(frame.Fields))
|
|
||||||
require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows
|
|
||||||
|
|
||||||
// Should have a notice about the limit
|
|
||||||
require.NotNil(t, frame.Meta)
|
|
||||||
require.Len(t, frame.Meta.Notices, 1)
|
|
||||||
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) {
|
|
||||||
// Create a non-SELECT result (should be skipped)
|
|
||||||
nonSelectResult := &pgconn.Result{}
|
|
||||||
nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1")
|
|
||||||
|
|
||||||
// Create a SELECT result
|
|
||||||
fieldDescs := []pgconn.FieldDescription{
|
|
||||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
|
||||||
}
|
|
||||||
mockRows := [][][]byte{
|
|
||||||
{[]byte("1")},
|
|
||||||
}
|
|
||||||
selectResult := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows,
|
|
||||||
}
|
|
||||||
selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{nonSelectResult, selectResult}
|
|
||||||
|
|
||||||
frame, err := convertResultsToFrame(results, 1000)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 1, len(frame.Fields))
|
|
||||||
require.Equal(t, 1, frame.Rows())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) {
|
|
||||||
// Create only non-SELECT results
|
|
||||||
result1 := &pgconn.Result{}
|
|
||||||
result1.CommandTag = pgconn.NewCommandTag("UPDATE 1")
|
|
||||||
|
|
||||||
result2 := &pgconn.Result{}
|
|
||||||
result2.CommandTag = pgconn.NewCommandTag("INSERT 1")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{result1, result2}
|
|
||||||
|
|
||||||
frame, err := convertResultsToFrame(results, 1000)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 0, len(frame.Fields))
|
|
||||||
require.Equal(t, 0, frame.Rows())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) {
|
|
||||||
// Create mock field descriptions (same structure for both results)
|
|
||||||
fieldDescs := []pgconn.FieldDescription{
|
|
||||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create first result with 3 rows
|
|
||||||
mockRows1 := [][][]byte{
|
|
||||||
{[]byte("1")},
|
|
||||||
{[]byte("2")},
|
|
||||||
{[]byte("3")},
|
|
||||||
}
|
|
||||||
result1 := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows1,
|
|
||||||
}
|
|
||||||
result1.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
|
||||||
|
|
||||||
// Create second result with 3 rows
|
|
||||||
mockRows2 := [][][]byte{
|
|
||||||
{[]byte("4")},
|
|
||||||
{[]byte("5")},
|
|
||||||
{[]byte("6")},
|
|
||||||
}
|
|
||||||
result2 := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows2,
|
|
||||||
}
|
|
||||||
result2.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{result1, result2}
|
|
||||||
|
|
||||||
// Set row limit to 2 (should limit each result to 2 rows)
|
|
||||||
frame, err := convertResultsToFrame(results, 2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 1, len(frame.Fields))
|
|
||||||
require.Equal(t, 4, frame.Rows()) // 2 rows from each result
|
|
||||||
|
|
||||||
// Should have notices about the limit from both results
|
|
||||||
require.NotNil(t, frame.Meta)
|
|
||||||
require.Len(t, frame.Meta.Notices, 2)
|
|
||||||
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
|
|
||||||
require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) {
|
|
||||||
// Create mock field descriptions
|
|
||||||
fieldDescs := []pgconn.FieldDescription{
|
|
||||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
|
||||||
{Name: "name", DataTypeOID: pgtype.TextOID},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create mock result data with null values
|
|
||||||
mockRows := [][][]byte{
|
|
||||||
{[]byte("1"), nil}, // null name
|
|
||||||
{nil, []byte("test2")}, // null id
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &pgconn.Result{
|
|
||||||
FieldDescriptions: fieldDescs,
|
|
||||||
Rows: mockRows,
|
|
||||||
}
|
|
||||||
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
|
||||||
|
|
||||||
results := []*pgconn.Result{result}
|
|
||||||
|
|
||||||
frame, err := convertResultsToFrame(results, 1000)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, frame)
|
|
||||||
require.Equal(t, 2, len(frame.Fields))
|
|
||||||
require.Equal(t, 2, frame.Rows())
|
|
||||||
|
|
||||||
// Check that null values are handled correctly
|
|
||||||
// The exact representation depends on the field type, but should not panic
|
|
||||||
require.NotPanics(t, func() {
|
|
||||||
frame.Fields[0].At(1) // null id
|
|
||||||
frame.Fields[1].At(0) // null name
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type testQueryResultTransformer struct {
|
type testQueryResultTransformer struct {
|
||||||
transformQueryErrorWasCalled bool
|
transformQueryErrorWasCalled bool
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/datasource"
|
||||||
|
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
|
||||||
|
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||||
|
"github.com/grafana/grafana/pkg/setting"
|
||||||
|
postgres "github.com/grafana/grafana/pkg/tsdb/grafana-postgresql-datasource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// No need to pass logger name, it will be set by the plugin SDK
|
||||||
|
logger := backend.NewLoggerWith()
|
||||||
|
// TODO: get rid of setting.NewCfg() and featuremgmt.FeatureToggles once PostgresDSUsePGX is removed
|
||||||
|
cfg := setting.NewCfg()
|
||||||
|
// We want to enable the feature toggle for api server
|
||||||
|
features := featuremgmt.WithFeatures(featuremgmt.FlagPostgresDSUsePGX)
|
||||||
|
if err := datasource.Manage("grafana-postgresql-datasource", postgres.NewInstanceSettings(logger, features, cfg.DataPath), datasource.ManageOpts{}); err != nil {
|
||||||
|
log.DefaultLogger.Error(err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
|
@ -294,7 +294,13 @@ func (s *Service) handleFunctions(ctx context.Context, dsInfo *datasourceInfo, _
|
||||||
|
|
||||||
_, rawBody, statusCode, err := doGraphiteRequest[map[string]any](ctx, dsInfo, s.logger, req, true)
|
_, rawBody, statusCode, err := doGraphiteRequest[map[string]any](ctx, dsInfo, s.logger, req, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, statusCode, fmt.Errorf("version request failed: %v", err)
|
return nil, statusCode, fmt.Errorf("functions request failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It's possible that a HTML response may be returned
|
||||||
|
// This isn't valid so we'll return an error and use the default functions
|
||||||
|
if strings.HasPrefix(string(*rawBody), "<") {
|
||||||
|
return []byte{}, http.StatusNotAcceptable, fmt.Errorf("invalid functions response received from Graphite")
|
||||||
}
|
}
|
||||||
|
|
||||||
if rawBody == nil {
|
if rawBody == nil {
|
||||||
|
|
|
@ -735,21 +735,41 @@ func TestHandleFunctions(t *testing.T) {
|
||||||
responseBody: `{"error": "internal error"}`,
|
responseBody: `{"error": "internal error"}`,
|
||||||
statusCode: 500,
|
statusCode: 500,
|
||||||
expectError: true,
|
expectError: true,
|
||||||
errorContains: "version request failed",
|
errorContains: "functions request failed",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "functions request not found",
|
name: "functions request not found",
|
||||||
responseBody: `{"error": "not found"}`,
|
responseBody: `{"error": "not found"}`,
|
||||||
statusCode: 404,
|
statusCode: 404,
|
||||||
expectError: true,
|
expectError: true,
|
||||||
errorContains: "version request failed",
|
errorContains: "functions request failed",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "network error",
|
name: "network error",
|
||||||
responseBody: "",
|
responseBody: "",
|
||||||
statusCode: 0,
|
statusCode: 0,
|
||||||
expectError: true,
|
expectError: true,
|
||||||
errorContains: "version request failed",
|
errorContains: "functions request failed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "html response",
|
||||||
|
responseBody: `<html>
|
||||||
|
<head>
|
||||||
|
<title>Graphite Browser</title>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
|
||||||
|
<frameset rows="60,*" frameborder="1" border="1">
|
||||||
|
<frame src="/browser/header/" name="Header" id='header' scrolling="no" noresize="true" />
|
||||||
|
|
||||||
|
<frame src="/composer/?" name="content" id="composerFrame"/>
|
||||||
|
|
||||||
|
</frameset>
|
||||||
|
</html>
|
||||||
|
`,
|
||||||
|
statusCode: 200,
|
||||||
|
expectError: true,
|
||||||
|
errorContains: "invalid functions response received from Graphite",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ func convertSQLite3URL(dsn string) (string, error) {
|
||||||
newDSN := dsn[:pos]
|
newDSN := dsn[:pos]
|
||||||
|
|
||||||
q := url.Values{}
|
q := url.Values{}
|
||||||
q.Add("_pragma", "busy_timeout(5000)")
|
q.Add("_pragma", "busy_timeout(7500)") // Default of mattn/go-sqlite3 is 5s but we increase it to 7.5s to try and avoid busy errors.
|
||||||
|
|
||||||
for key, values := range params {
|
for key, values := range params {
|
||||||
if alias, ok := dsnAlias[strings.ToLower(key)]; ok {
|
if alias, ok := dsnAlias[strings.ToLower(key)]; ok {
|
||||||
|
|
|
@ -7,7 +7,9 @@ import { Button, Drawer, Dropdown, Icon, Menu, MenuItem } from '@grafana/ui';
|
||||||
import { Permissions } from 'app/core/components/AccessControl';
|
import { Permissions } from 'app/core/components/AccessControl';
|
||||||
import { appEvents } from 'app/core/core';
|
import { appEvents } from 'app/core/core';
|
||||||
import { RepoType } from 'app/features/provisioning/Wizard/types';
|
import { RepoType } from 'app/features/provisioning/Wizard/types';
|
||||||
|
import { BulkMoveProvisionedResource } from 'app/features/provisioning/components/BulkActions/BulkMoveProvisionedResource';
|
||||||
import { DeleteProvisionedFolderForm } from 'app/features/provisioning/components/Folders/DeleteProvisionedFolderForm';
|
import { DeleteProvisionedFolderForm } from 'app/features/provisioning/components/Folders/DeleteProvisionedFolderForm';
|
||||||
|
import { useIsProvisionedInstance } from 'app/features/provisioning/hooks/useIsProvisionedInstance';
|
||||||
import { getReadOnlyTooltipText } from 'app/features/provisioning/utils/repository';
|
import { getReadOnlyTooltipText } from 'app/features/provisioning/utils/repository';
|
||||||
import { ShowModalReactEvent } from 'app/types/events';
|
import { ShowModalReactEvent } from 'app/types/events';
|
||||||
import { FolderDTO } from 'app/types/folders';
|
import { FolderDTO } from 'app/types/folders';
|
||||||
|
@ -29,14 +31,18 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
|
||||||
const [isOpen, setIsOpen] = useState(false);
|
const [isOpen, setIsOpen] = useState(false);
|
||||||
const [showPermissionsDrawer, setShowPermissionsDrawer] = useState(false);
|
const [showPermissionsDrawer, setShowPermissionsDrawer] = useState(false);
|
||||||
const [showDeleteProvisionedFolderDrawer, setShowDeleteProvisionedFolderDrawer] = useState(false);
|
const [showDeleteProvisionedFolderDrawer, setShowDeleteProvisionedFolderDrawer] = useState(false);
|
||||||
|
const [showMoveProvisionedFolderDrawer, setShowMoveProvisionedFolderDrawer] = useState(false);
|
||||||
const [moveFolder] = useMoveFolderMutationFacade();
|
const [moveFolder] = useMoveFolderMutationFacade();
|
||||||
|
const isProvisionedInstance = useIsProvisionedInstance();
|
||||||
|
|
||||||
const deleteFolder = useDeleteFolderMutationFacade();
|
const deleteFolder = useDeleteFolderMutationFacade();
|
||||||
|
|
||||||
const { canEditFolders, canDeleteFolders, canViewPermissions, canSetPermissions } = getFolderPermissions(folder);
|
const { canEditFolders, canDeleteFolders, canViewPermissions, canSetPermissions } = getFolderPermissions(folder);
|
||||||
const isProvisionedFolder = folder.managedBy === ManagerKind.Repo;
|
const isProvisionedFolder = folder.managedBy === ManagerKind.Repo;
|
||||||
|
// When its single provisioned folder, cannot move the root repository folder
|
||||||
|
const isProvisionedRootFolder = isProvisionedFolder && !isProvisionedInstance && folder.parentUid === undefined;
|
||||||
// Can only move folders when the folder is not provisioned
|
// Can only move folders when the folder is not provisioned
|
||||||
const canMoveFolder = canEditFolders && !isProvisionedFolder;
|
const canMoveFolder = canEditFolders && !isProvisionedRootFolder;
|
||||||
|
|
||||||
const onMove = async (destinationUID: string) => {
|
const onMove = async (destinationUID: string) => {
|
||||||
await moveFolder({ folderUID: folder.uid, destinationUID: destinationUID });
|
await moveFolder({ folderUID: folder.uid, destinationUID: destinationUID });
|
||||||
|
@ -115,6 +121,10 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
|
||||||
setShowDeleteProvisionedFolderDrawer(true);
|
setShowDeleteProvisionedFolderDrawer(true);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleShowMoveProvisionedFolderDrawer = () => {
|
||||||
|
setShowMoveProvisionedFolderDrawer(true);
|
||||||
|
};
|
||||||
|
|
||||||
const managePermissionsLabel = t('browse-dashboards.folder-actions-button.manage-permissions', 'Manage permissions');
|
const managePermissionsLabel = t('browse-dashboards.folder-actions-button.manage-permissions', 'Manage permissions');
|
||||||
const moveLabel = t('browse-dashboards.folder-actions-button.move', 'Move');
|
const moveLabel = t('browse-dashboards.folder-actions-button.move', 'Move');
|
||||||
const deleteLabel = t('browse-dashboards.folder-actions-button.delete', 'Delete');
|
const deleteLabel = t('browse-dashboards.folder-actions-button.delete', 'Delete');
|
||||||
|
@ -122,7 +132,12 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
|
||||||
const menu = (
|
const menu = (
|
||||||
<Menu>
|
<Menu>
|
||||||
{canViewPermissions && <MenuItem onClick={() => setShowPermissionsDrawer(true)} label={managePermissionsLabel} />}
|
{canViewPermissions && <MenuItem onClick={() => setShowPermissionsDrawer(true)} label={managePermissionsLabel} />}
|
||||||
{canMoveFolder && !isReadOnlyRepo && <MenuItem onClick={showMoveModal} label={moveLabel} />}
|
{canMoveFolder && !isReadOnlyRepo && (
|
||||||
|
<MenuItem
|
||||||
|
onClick={isProvisionedFolder ? handleShowMoveProvisionedFolderDrawer : showMoveModal}
|
||||||
|
label={moveLabel}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
{canDeleteFolders && !isReadOnlyRepo && (
|
{canDeleteFolders && !isReadOnlyRepo && (
|
||||||
<MenuItem
|
<MenuItem
|
||||||
destructive
|
destructive
|
||||||
|
@ -175,6 +190,19 @@ export function FolderActionsButton({ folder, repoType, isReadOnlyRepo }: Props)
|
||||||
/>
|
/>
|
||||||
</Drawer>
|
</Drawer>
|
||||||
)}
|
)}
|
||||||
|
{showMoveProvisionedFolderDrawer && (
|
||||||
|
<Drawer
|
||||||
|
title={t('browse-dashboards.action.move-provisioned-folder', 'Move provisioned folder')}
|
||||||
|
subtitle={folder.title}
|
||||||
|
onClose={() => setShowMoveProvisionedFolderDrawer(false)}
|
||||||
|
>
|
||||||
|
<BulkMoveProvisionedResource
|
||||||
|
folderUid={folder.uid}
|
||||||
|
selectedItems={{ dashboard: {}, folder: { [folder.uid]: true } }}
|
||||||
|
onDismiss={() => setShowMoveProvisionedFolderDrawer(false)}
|
||||||
|
/>
|
||||||
|
</Drawer>
|
||||||
|
)}
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import { PropsWithChildren } from 'react';
|
||||||
import { CoreApp, DataQueryRequest, dateTime, LoadingState, PanelData, toDataFrame } from '@grafana/data';
|
import { CoreApp, DataQueryRequest, dateTime, LoadingState, PanelData, toDataFrame } from '@grafana/data';
|
||||||
import { DataQuery } from '@grafana/schema';
|
import { DataQuery } from '@grafana/schema';
|
||||||
import { mockDataSource } from 'app/features/alerting/unified/mocks';
|
import { mockDataSource } from 'app/features/alerting/unified/mocks';
|
||||||
|
import { ExpressionDatasourceUID } from 'app/features/expressions/types';
|
||||||
|
|
||||||
import { filterPanelDataToQuery, Props, QueryEditorRow } from './QueryEditorRow';
|
import { filterPanelDataToQuery, Props, QueryEditorRow } from './QueryEditorRow';
|
||||||
|
|
||||||
|
@ -464,5 +465,28 @@ describe('QueryEditorRow', () => {
|
||||||
expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument();
|
expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should not render saved queries buttons when query is an expression query', async () => {
|
||||||
|
const expressionQuery = {
|
||||||
|
refId: 'B',
|
||||||
|
datasource: {
|
||||||
|
uid: ExpressionDatasourceUID,
|
||||||
|
type: '__expr__',
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const expressionProps = {
|
||||||
|
...props(testData),
|
||||||
|
query: expressionQuery,
|
||||||
|
queries: [expressionQuery],
|
||||||
|
};
|
||||||
|
|
||||||
|
render(<QueryEditorRow {...expressionProps} />);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.queryByText('Save query')).not.toBeInTheDocument();
|
||||||
|
expect(screen.queryByText('Replace with saved query')).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -34,6 +34,7 @@ import {
|
||||||
} from 'app/core/components/QueryOperationRow/QueryOperationRow';
|
} from 'app/core/components/QueryOperationRow/QueryOperationRow';
|
||||||
|
|
||||||
import { useQueryLibraryContext } from '../../explore/QueryLibrary/QueryLibraryContext';
|
import { useQueryLibraryContext } from '../../explore/QueryLibrary/QueryLibraryContext';
|
||||||
|
import { ExpressionDatasourceUID } from '../../expressions/types';
|
||||||
|
|
||||||
import { QueryActionComponent, RowActionComponents } from './QueryActionComponent';
|
import { QueryActionComponent, RowActionComponents } from './QueryActionComponent';
|
||||||
import { QueryEditorRowHeader } from './QueryEditorRowHeader';
|
import { QueryEditorRowHeader } from './QueryEditorRowHeader';
|
||||||
|
@ -386,10 +387,11 @@ export class QueryEditorRow<TQuery extends DataQuery> extends PureComponent<Prop
|
||||||
const hasEditorHelp = datasource?.components?.QueryEditorHelp;
|
const hasEditorHelp = datasource?.components?.QueryEditorHelp;
|
||||||
const isEditingQueryLibrary = queryLibraryRef !== undefined;
|
const isEditingQueryLibrary = queryLibraryRef !== undefined;
|
||||||
const isUnifiedAlerting = app === CoreApp.UnifiedAlerting;
|
const isUnifiedAlerting = app === CoreApp.UnifiedAlerting;
|
||||||
|
const isExpressionQuery = query.datasource?.uid === ExpressionDatasourceUID;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
{!isEditingQueryLibrary && !isUnifiedAlerting && (
|
{!isEditingQueryLibrary && !isUnifiedAlerting && !isExpressionQuery && (
|
||||||
<SavedQueryButtons
|
<SavedQueryButtons
|
||||||
query={query}
|
query={query}
|
||||||
app={app}
|
app={app}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import { connect, ConnectedProps } from 'react-redux';
|
||||||
|
|
||||||
import { GrafanaTheme2 } from '@grafana/data';
|
import { GrafanaTheme2 } from '@grafana/data';
|
||||||
import { Trans, t } from '@grafana/i18n';
|
import { Trans, t } from '@grafana/i18n';
|
||||||
|
import { config, getBackendSrv } from '@grafana/runtime';
|
||||||
import {
|
import {
|
||||||
Avatar,
|
Avatar,
|
||||||
CellProps,
|
CellProps,
|
||||||
|
@ -65,6 +66,7 @@ export const TeamList = ({
|
||||||
changeSort,
|
changeSort,
|
||||||
}: Props) => {
|
}: Props) => {
|
||||||
const [roleOptions, setRoleOptions] = useState<Role[]>([]);
|
const [roleOptions, setRoleOptions] = useState<Role[]>([]);
|
||||||
|
const [scimGroupSyncEnabled, setScimGroupSyncEnabled] = useState(false);
|
||||||
const styles = useStyles2(getStyles);
|
const styles = useStyles2(getStyles);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
@ -77,6 +79,25 @@ export const TeamList = ({
|
||||||
}
|
}
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const checkSCIMSettings = async () => {
|
||||||
|
if (!config.featureToggles.enableSCIM) {
|
||||||
|
setScimGroupSyncEnabled(false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const scimSettings = await getBackendSrv().get(
|
||||||
|
`/apis/scim.grafana.app/v0alpha1/namespaces/${config.namespace}/config`
|
||||||
|
);
|
||||||
|
setScimGroupSyncEnabled(scimSettings?.items[0]?.spec?.enableGroupSync || false);
|
||||||
|
} catch {
|
||||||
|
setScimGroupSyncEnabled(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
checkSCIMSettings();
|
||||||
|
}, []);
|
||||||
|
|
||||||
const canCreate = contextSrv.hasPermission(AccessControlAction.ActionTeamsCreate);
|
const canCreate = contextSrv.hasPermission(AccessControlAction.ActionTeamsCreate);
|
||||||
const displayRolePicker = shouldDisplayRolePicker();
|
const displayRolePicker = shouldDisplayRolePicker();
|
||||||
|
|
||||||
|
@ -198,7 +219,7 @@ export const TeamList = ({
|
||||||
const canReadTeam = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsRead, original);
|
const canReadTeam = contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsRead, original);
|
||||||
const canDelete =
|
const canDelete =
|
||||||
contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsDelete, original) &&
|
contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsDelete, original) &&
|
||||||
!original.isProvisioned;
|
(!scimGroupSyncEnabled || !original.isProvisioned);
|
||||||
return (
|
return (
|
||||||
<Stack direction="row" justifyContent="flex-end" gap={2}>
|
<Stack direction="row" justifyContent="flex-end" gap={2}>
|
||||||
{canReadTeam && (
|
{canReadTeam && (
|
||||||
|
@ -226,7 +247,7 @@ export const TeamList = ({
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
[displayRolePicker, hasFetched, rolesLoading, roleOptions, deleteTeam, styles]
|
[displayRolePicker, hasFetched, rolesLoading, roleOptions, deleteTeam, styles, scimGroupSyncEnabled]
|
||||||
);
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
"type": "datasource",
|
"type": "datasource",
|
||||||
"name": "PostgreSQL",
|
"name": "PostgreSQL",
|
||||||
"id": "grafana-postgresql-datasource",
|
"id": "grafana-postgresql-datasource",
|
||||||
|
"executable": "gpx_grafana-postgresql-datasource",
|
||||||
"aliasIDs": ["postgres"],
|
"aliasIDs": ["postgres"],
|
||||||
"category": "sql",
|
"category": "sql",
|
||||||
|
|
||||||
|
@ -21,6 +22,9 @@
|
||||||
{ "name": "Documentation", "url": "https://grafana.com/docs/grafana/latest/datasources/postgres/" }
|
{ "name": "Documentation", "url": "https://grafana.com/docs/grafana/latest/datasources/postgres/" }
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"grafanaDependency": ">=11.6.0"
|
||||||
|
},
|
||||||
|
|
||||||
"alerting": true,
|
"alerting": true,
|
||||||
"annotations": true,
|
"annotations": true,
|
||||||
|
|
|
@ -1029,9 +1029,15 @@ export class GraphiteDatasource
|
||||||
};
|
};
|
||||||
|
|
||||||
if (config.featureToggles.graphiteBackendMode) {
|
if (config.featureToggles.graphiteBackendMode) {
|
||||||
const functions = await this.getResource<string>('functions');
|
try {
|
||||||
this.funcDefs = gfunc.parseFuncDefs(functions);
|
const functions = await this.getResource<string>('functions');
|
||||||
return this.funcDefs;
|
this.funcDefs = gfunc.parseFuncDefs(functions);
|
||||||
|
return this.funcDefs;
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Fetching graphite functions error', error);
|
||||||
|
this.funcDefs = gfunc.getFuncDefs(this.graphiteVersion);
|
||||||
|
return this.funcDefs;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return lastValueFrom(
|
return lastValueFrom(
|
||||||
|
|
|
@ -273,6 +273,15 @@ describe('interpolateQueryExpr', () => {
|
||||||
replace: jest.fn().mockImplementation((...rest: unknown[]) => 'templateVarReplaced'),
|
replace: jest.fn().mockImplementation((...rest: unknown[]) => 'templateVarReplaced'),
|
||||||
} as unknown as TemplateSrv;
|
} as unknown as TemplateSrv;
|
||||||
let ds = getMockInfluxDS(getMockDSInstanceSettings(), templateSrvStub);
|
let ds = getMockInfluxDS(getMockDSInstanceSettings(), templateSrvStub);
|
||||||
|
|
||||||
|
// Mock console.warn as we expect tests to use it
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.spyOn(console, 'warn').mockImplementation();
|
||||||
|
});
|
||||||
|
afterEach(() => {
|
||||||
|
jest.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
it('should return the value as it is', () => {
|
it('should return the value as it is', () => {
|
||||||
const value = 'normalValue';
|
const value = 'normalValue';
|
||||||
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
|
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
|
||||||
|
@ -281,6 +290,18 @@ describe('interpolateQueryExpr', () => {
|
||||||
expect(result).toBe(expectation);
|
expect(result).toBe(expectation);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should return the escaped value if the value wrapped in regex without !~ or =~', () => {
|
||||||
|
const value = '/special/path';
|
||||||
|
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
|
||||||
|
const result = ds.interpolateQueryExpr(
|
||||||
|
value,
|
||||||
|
variableMock,
|
||||||
|
'select atan(z/sqrt(3.14)), that where path /$tempVar/'
|
||||||
|
);
|
||||||
|
const expectation = `\\/special\\/path`;
|
||||||
|
expect(result).toBe(expectation);
|
||||||
|
});
|
||||||
|
|
||||||
it('should return the escaped value if the value wrapped in regex', () => {
|
it('should return the escaped value if the value wrapped in regex', () => {
|
||||||
const value = '/special/path';
|
const value = '/special/path';
|
||||||
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
|
const variableMock = queryBuilder().withId('tempVar').withName('tempVar').withMulti(false).build();
|
||||||
|
|
|
@ -360,10 +360,7 @@ export default class InfluxDatasource extends DataSourceWithBackend<InfluxQuery,
|
||||||
// we want to see how it's been used. If it is used in a regex expression
|
// we want to see how it's been used. If it is used in a regex expression
|
||||||
// we escape it. Otherwise, we return it directly.
|
// we escape it. Otherwise, we return it directly.
|
||||||
// The regex below searches for regexes within the query string
|
// The regex below searches for regexes within the query string
|
||||||
const regexMatcher = new RegExp(
|
const regexMatcher = new RegExp(/(?<=\/).+?(?=\/)/, 'gm');
|
||||||
/(\s*(=|!)~\s*)\/((?![*+?])(?:[^\r\n\[/\\]|\\.|\[(?:[^\r\n\]\\]|\\.)*\])+)\/((?:g(?:im?|mi?)?|i(?:gm?|mg?)?|m(?:gi?|ig?)?)?)/,
|
|
||||||
'gm'
|
|
||||||
);
|
|
||||||
// If matches are found this regex is evaluated to check if the variable is contained in the regex /^...$/ (^ and $ is optional)
|
// If matches are found this regex is evaluated to check if the variable is contained in the regex /^...$/ (^ and $ is optional)
|
||||||
// i.e. /^$myVar$/ or /$myVar/ or /^($myVar)$/
|
// i.e. /^$myVar$/ or /$myVar/ or /^($myVar)$/
|
||||||
const regex = new RegExp(`\\/(?:\\^)?(.*)(\\$${variable.name})(.*)(?:\\$)?\\/`, 'gm');
|
const regex = new RegExp(`\\/(?:\\^)?(.*)(\\$${variable.name})(.*)(?:\\$)?\\/`, 'gm');
|
||||||
|
@ -377,14 +374,22 @@ export default class InfluxDatasource extends DataSourceWithBackend<InfluxQuery,
|
||||||
if (!queryMatches) {
|
if (!queryMatches) {
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
// Use the variable specific regex against the query
|
||||||
|
if (!query.match(regex)) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
for (const match of queryMatches) {
|
for (const match of queryMatches) {
|
||||||
if (!match.match(regex)) {
|
// It is expected that the RegExp should be valid. As our regex matcher matches any text between two '/'
|
||||||
continue;
|
// we also validate that the expression compiles before assuming it is a regular expression.
|
||||||
}
|
try {
|
||||||
|
new RegExp(match);
|
||||||
|
|
||||||
// If the value is a string array first escape them then join them with pipe
|
// If the value is a string array first escape them then join them with pipe
|
||||||
// then put inside parenthesis.
|
// then put inside parenthesis.
|
||||||
return typeof value === 'string' ? escapeRegex(value) : `(${value.map((v) => escapeRegex(v)).join('|')})`;
|
return typeof value === 'string' ? escapeRegex(value) : `(${value.map((v) => escapeRegex(v)).join('|')})`;
|
||||||
|
} catch (e) {
|
||||||
|
console.warn(`Supplied match is not valid regex: ${match}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return value;
|
return value;
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
import { css, cx } from '@emotion/css';
|
import { css, cx } from '@emotion/css';
|
||||||
import { useMemo } from 'react';
|
import { useMemo } from 'react';
|
||||||
import AutoSizer from 'react-virtualized-auto-sizer';
|
|
||||||
|
|
||||||
import { GrafanaTheme2, StandardEditorProps } from '@grafana/data';
|
import { GrafanaTheme2, StandardEditorProps } from '@grafana/data';
|
||||||
import {
|
import {
|
||||||
|
@ -25,26 +24,17 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={cx(styles.editorBox)}>
|
<div className={cx(styles.editorBox)}>
|
||||||
<AutoSizer disableHeight>
|
<CodeEditor
|
||||||
{({ width }) => {
|
value={value}
|
||||||
if (width === 0) {
|
onBlur={onChange}
|
||||||
return null;
|
onSave={onChange}
|
||||||
}
|
language={language}
|
||||||
return (
|
width="100%"
|
||||||
<CodeEditor
|
showMiniMap={false}
|
||||||
value={value}
|
showLineNumbers={false}
|
||||||
onBlur={onChange}
|
height="500px"
|
||||||
onSave={onChange}
|
getSuggestions={getSuggestions}
|
||||||
language={language}
|
/>
|
||||||
width={width}
|
|
||||||
showMiniMap={false}
|
|
||||||
showLineNumbers={false}
|
|
||||||
height="500px"
|
|
||||||
getSuggestions={getSuggestions}
|
|
||||||
/>
|
|
||||||
);
|
|
||||||
}}
|
|
||||||
</AutoSizer>
|
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
@ -52,8 +42,6 @@ export const TextPanelEditor = ({ value, onChange, context }: StandardEditorProp
|
||||||
const getStyles = (theme: GrafanaTheme2) => ({
|
const getStyles = (theme: GrafanaTheme2) => ({
|
||||||
editorBox: css({
|
editorBox: css({
|
||||||
label: 'editorBox',
|
label: 'editorBox',
|
||||||
border: `1px solid ${theme.colors.border.medium}`,
|
|
||||||
borderRadius: theme.shape.radius.default,
|
|
||||||
margin: theme.spacing(0.5, 0),
|
margin: theme.spacing(0.5, 0),
|
||||||
width: '100%',
|
width: '100%',
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -10228,7 +10228,7 @@
|
||||||
"title": "Zdroje dat"
|
"title": "Zdroje dat"
|
||||||
},
|
},
|
||||||
"databases": {
|
"databases": {
|
||||||
"title": "Databáze"
|
"title": ""
|
||||||
},
|
},
|
||||||
"datasources": {
|
"datasources": {
|
||||||
"subtitle": "Přidávejte a konfigurujte zdroje dat",
|
"subtitle": "Přidávejte a konfigurujte zdroje dat",
|
||||||
|
@ -11772,6 +11772,8 @@
|
||||||
"button-cancelling": "",
|
"button-cancelling": "",
|
||||||
"button-next": "Dokončit",
|
"button-next": "Dokončit",
|
||||||
"button-start": "Zahájit synchronizaci",
|
"button-start": "Zahájit synchronizaci",
|
||||||
|
"check-status-button": "",
|
||||||
|
"check-status-message": "",
|
||||||
"discard-modal": {
|
"discard-modal": {
|
||||||
"body": "Tímto odstraníte konfiguraci úložiště a přijdete o veškerý pokrok. Opravdu chcete zahodit změny?",
|
"body": "Tímto odstraníte konfiguraci úložiště a přijdete o veškerý pokrok. Opravdu chcete zahodit změny?",
|
||||||
"confirm": "Ano, zahodit",
|
"confirm": "Ano, zahodit",
|
||||||
|
|
|
@ -10166,7 +10166,7 @@
|
||||||
"title": "Datenquellen"
|
"title": "Datenquellen"
|
||||||
},
|
},
|
||||||
"databases": {
|
"databases": {
|
||||||
"title": "Datenbanken"
|
"title": ""
|
||||||
},
|
},
|
||||||
"datasources": {
|
"datasources": {
|
||||||
"subtitle": "Füge Datenquellen hinzu und konfiguriere sie",
|
"subtitle": "Füge Datenquellen hinzu und konfiguriere sie",
|
||||||
|
@ -11692,6 +11692,8 @@
|
||||||
"button-cancelling": "",
|
"button-cancelling": "",
|
||||||
"button-next": "Fertigstellen",
|
"button-next": "Fertigstellen",
|
||||||
"button-start": "Synchronisierung starten",
|
"button-start": "Synchronisierung starten",
|
||||||
|
"check-status-button": "",
|
||||||
|
"check-status-message": "",
|
||||||
"discard-modal": {
|
"discard-modal": {
|
||||||
"body": "Dadurch wird die Repository-Konfiguration gelöscht und Sie verlieren alle Fortschritte. Sind Sie sicher, dass Sie Ihre Änderungen verwerfen möchten?",
|
"body": "Dadurch wird die Repository-Konfiguration gelöscht und Sie verlieren alle Fortschritte. Sind Sie sicher, dass Sie Ihre Änderungen verwerfen möchten?",
|
||||||
"confirm": "Ja, verwerfen",
|
"confirm": "Ja, verwerfen",
|
||||||
|
|
|
@ -3496,6 +3496,7 @@
|
||||||
"move-modal-field-label": "Folder name",
|
"move-modal-field-label": "Folder name",
|
||||||
"move-modal-text": "This action will move the following content:",
|
"move-modal-text": "This action will move the following content:",
|
||||||
"move-modal-title": "Move",
|
"move-modal-title": "Move",
|
||||||
|
"move-provisioned-folder": "Move provisioned folder",
|
||||||
"moving": "Moving...",
|
"moving": "Moving...",
|
||||||
"new-folder-name-required-phrase": "Folder name is required.",
|
"new-folder-name-required-phrase": "Folder name is required.",
|
||||||
"selected-mix-resources-modal-text": "You have selected both provisioned and non-provisioned resources. These cannot be processed together. Please select only provisioned resources or only non-provisioned resources and try again.",
|
"selected-mix-resources-modal-text": "You have selected both provisioned and non-provisioned resources. These cannot be processed together. Please select only provisioned resources or only non-provisioned resources and try again.",
|
||||||
|
|
|
@ -10166,7 +10166,7 @@
|
||||||
"title": "Orígenes de datos"
|
"title": "Orígenes de datos"
|
||||||
},
|
},
|
||||||
"databases": {
|
"databases": {
|
||||||
"title": "Bases de datos"
|
"title": ""
|
||||||
},
|
},
|
||||||
"datasources": {
|
"datasources": {
|
||||||
"subtitle": "Añadir y configurar orígenes de datos",
|
"subtitle": "Añadir y configurar orígenes de datos",
|
||||||
|
@ -11692,6 +11692,8 @@
|
||||||
"button-cancelling": "",
|
"button-cancelling": "",
|
||||||
"button-next": "Terminar",
|
"button-next": "Terminar",
|
||||||
"button-start": "Iniciar la sincronización",
|
"button-start": "Iniciar la sincronización",
|
||||||
|
"check-status-button": "",
|
||||||
|
"check-status-message": "",
|
||||||
"discard-modal": {
|
"discard-modal": {
|
||||||
"body": "Esto eliminará la configuración del repositorio y se perderá todo el progreso. ¿Seguro que quieres descartar los cambios?",
|
"body": "Esto eliminará la configuración del repositorio y se perderá todo el progreso. ¿Seguro que quieres descartar los cambios?",
|
||||||
"confirm": "Sí, descartar",
|
"confirm": "Sí, descartar",
|
||||||
|
|
|
@ -10166,7 +10166,7 @@
|
||||||
"title": "Sources de données"
|
"title": "Sources de données"
|
||||||
},
|
},
|
||||||
"databases": {
|
"databases": {
|
||||||
"title": "Bases de données"
|
"title": ""
|
||||||
},
|
},
|
||||||
"datasources": {
|
"datasources": {
|
||||||
"subtitle": "Ajouter et configurer des sources de données",
|
"subtitle": "Ajouter et configurer des sources de données",
|
||||||
|
@ -11692,6 +11692,8 @@
|
||||||
"button-cancelling": "",
|
"button-cancelling": "",
|
||||||
"button-next": "Terminer",
|
"button-next": "Terminer",
|
||||||
"button-start": "Commencer la synchronisation",
|
"button-start": "Commencer la synchronisation",
|
||||||
|
"check-status-button": "",
|
||||||
|
"check-status-message": "",
|
||||||
"discard-modal": {
|
"discard-modal": {
|
||||||
"body": "Cette action supprimera la configuration du dépôt et vous perdrez toutes vos modifications. Voulez-vous vraiment annuler vos modifications ?",
|
"body": "Cette action supprimera la configuration du dépôt et vous perdrez toutes vos modifications. Voulez-vous vraiment annuler vos modifications ?",
|
||||||
"confirm": "Oui, annuler",
|
"confirm": "Oui, annuler",
|
||||||
|
|
|
@ -10166,7 +10166,7 @@
|
||||||
"title": "Adatforrások"
|
"title": "Adatforrások"
|
||||||
},
|
},
|
||||||
"databases": {
|
"databases": {
|
||||||
"title": "Adatbázisok"
|
"title": ""
|
||||||
},
|
},
|
||||||
"datasources": {
|
"datasources": {
|
||||||
"subtitle": "Adatforrások hozzáadása és konfigurálása",
|
"subtitle": "Adatforrások hozzáadása és konfigurálása",
|
||||||
|
@ -11692,6 +11692,8 @@
|
||||||
"button-cancelling": "",
|
"button-cancelling": "",
|
||||||
"button-next": "Befejezés",
|
"button-next": "Befejezés",
|
||||||
"button-start": "Szinkronizálás indítása",
|
"button-start": "Szinkronizálás indítása",
|
||||||
|
"check-status-button": "",
|
||||||
|
"check-status-message": "",
|
||||||
"discard-modal": {
|
"discard-modal": {
|
||||||
"body": "Ez törli az adattár konfigurációját, és az összes előrehaladás el fog veszni. Biztosan elveti a módosításokat?",
|
"body": "Ez törli az adattár konfigurációját, és az összes előrehaladás el fog veszni. Biztosan elveti a módosításokat?",
|
||||||
"confirm": "Igen, elvetés",
|
"confirm": "Igen, elvetés",
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue