mirror of https://github.com/grafana/grafana.git
Merge branch 'main' into undef1nd/ofrep-frontend-settings
This commit is contained in:
commit
f4f4b92192
|
@ -88,6 +88,7 @@
|
|||
/apps/preferences/ @grafana/grafana-app-platform-squad @grafana/grafana-frontend-platform
|
||||
/apps/shorturl/ @grafana/sharing-squad
|
||||
/apps/secret/ @grafana/grafana-operator-experience-squad
|
||||
/apps/scope/ @grafana/grafana-operator-experience-squad
|
||||
/apps/investigations/ @fcjack @matryer @svennergr
|
||||
/apps/advisor/ @grafana/plugins-platform-backend
|
||||
/apps/iam/ @grafana/access-squad
|
||||
|
@ -629,6 +630,7 @@
|
|||
/packages/grafana-runtime/rollup.config.ts @grafana/grafana-frontend-platform
|
||||
/packages/grafana-runtime/src/index.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend
|
||||
/packages/grafana-runtime/src/internal/index.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend
|
||||
/packages/grafana-runtime/src/internal/openFeature @grafana/grafana-frontend-platform
|
||||
/packages/grafana-runtime/src/unstable.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend
|
||||
/packages/grafana-runtime/tsconfig.build.json @grafana/grafana-frontend-platform
|
||||
/packages/grafana-runtime/tsconfig.json @grafana/grafana-frontend-platform
|
||||
|
|
|
@ -99,6 +99,7 @@ COPY apps/correlations apps/correlations
|
|||
COPY apps/preferences apps/preferences
|
||||
COPY apps/provisioning apps/provisioning
|
||||
COPY apps/secret apps/secret
|
||||
COPY apps/scope apps/scope
|
||||
COPY apps/investigations apps/investigations
|
||||
COPY apps/advisor apps/advisor
|
||||
COPY apps/dashboard apps/dashboard
|
||||
|
|
|
@ -5,6 +5,7 @@ go 1.24.6
|
|||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.46.0
|
||||
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250804150913-990f1c69ecc2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
)
|
||||
|
@ -42,7 +43,6 @@ require (
|
|||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.11.1 // indirect
|
||||
github.com/woodsbury/decimal128 v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (stars *StarsSpec) Add(group, kind, name string) {
|
||||
for i, r := range stars.Resource {
|
||||
if r.Group == group && r.Kind == kind {
|
||||
r.Names = append(r.Names, name)
|
||||
slices.Sort(r.Names)
|
||||
stars.Resource[i].Names = slices.Compact(r.Names)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Add the resource kind
|
||||
stars.Resource = append(stars.Resource, StarsResource{
|
||||
Group: group,
|
||||
Kind: kind,
|
||||
Names: []string{name},
|
||||
})
|
||||
stars.Normalize()
|
||||
}
|
||||
|
||||
func (stars *StarsSpec) Remove(group, kind, name string) {
|
||||
for i, r := range stars.Resource {
|
||||
if r.Group == group && r.Kind == kind {
|
||||
idx := slices.Index(r.Names, name)
|
||||
if idx < 0 {
|
||||
return // does not exist
|
||||
}
|
||||
r.Names = append(r.Names[:idx], r.Names[idx+1:]...)
|
||||
stars.Resource[i].Names = r.Names
|
||||
if len(r.Names) == 0 {
|
||||
stars.Normalize()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Makes sure everything is in sorted order
|
||||
func (stars *StarsSpec) Normalize() {
|
||||
resources := make([]StarsResource, 0, len(stars.Resource))
|
||||
for _, r := range stars.Resource {
|
||||
if len(r.Names) > 0 {
|
||||
slices.Sort(r.Names)
|
||||
r.Names = slices.Compact(r.Names) // removes any duplicates
|
||||
resources = append(resources, r)
|
||||
}
|
||||
}
|
||||
slices.SortFunc(resources, func(a StarsResource, b StarsResource) int {
|
||||
return strings.Compare(a.Group+a.Kind, b.Group+b.Kind)
|
||||
})
|
||||
if len(resources) == 0 {
|
||||
resources = nil
|
||||
}
|
||||
stars.Resource = resources
|
||||
}
|
||||
|
||||
func Changes(current []string, target []string) (added []string, removed []string, same []string) {
|
||||
lookup := map[string]bool{}
|
||||
for _, k := range current {
|
||||
lookup[k] = true
|
||||
}
|
||||
for _, k := range target {
|
||||
if lookup[k] {
|
||||
same = append(same, k)
|
||||
delete(lookup, k)
|
||||
} else {
|
||||
added = append(added, k)
|
||||
}
|
||||
}
|
||||
for k := range lookup {
|
||||
removed = append(removed, k)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,235 @@
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type starItem struct {
|
||||
group string
|
||||
kind string
|
||||
name string
|
||||
}
|
||||
|
||||
func TestStarsWrite(t *testing.T) {
|
||||
t.Run("apply", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spec *StarsSpec
|
||||
item starItem
|
||||
remove bool
|
||||
expect *StarsSpec
|
||||
}{{
|
||||
name: "add to an existing array",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "x"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "c",
|
||||
},
|
||||
remove: false,
|
||||
expect: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c", "x"}, // added "b" (and sorted)
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "remove from an existing array",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "b",
|
||||
},
|
||||
remove: true,
|
||||
expect: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "c"}, // removed "b"
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "add to empty spec",
|
||||
spec: &StarsSpec{},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "a",
|
||||
},
|
||||
remove: false,
|
||||
expect: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a"},
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "remove item that does not exist",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"x"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "a",
|
||||
},
|
||||
remove: true,
|
||||
}, {
|
||||
name: "add item that already exist",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"x"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "x",
|
||||
},
|
||||
remove: false,
|
||||
}, {
|
||||
name: "remove from empty",
|
||||
spec: &StarsSpec{},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "a",
|
||||
},
|
||||
remove: true,
|
||||
}, {
|
||||
name: "remove item that does not exist",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "X",
|
||||
},
|
||||
remove: true,
|
||||
}, {
|
||||
name: "remove last item",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "a",
|
||||
},
|
||||
remove: true,
|
||||
expect: &StarsSpec{}, // empty object
|
||||
}, {
|
||||
name: "remove last item (with others)",
|
||||
spec: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a"},
|
||||
}, {
|
||||
Group: "g2",
|
||||
Kind: "k2",
|
||||
Names: []string{"a"},
|
||||
}}},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
name: "a",
|
||||
},
|
||||
remove: true,
|
||||
expect: &StarsSpec{
|
||||
Resource: []StarsResource{{
|
||||
Group: "g2",
|
||||
Kind: "k2",
|
||||
Names: []string{"a"},
|
||||
}}},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.expect == nil {
|
||||
tt.expect = tt.spec.DeepCopy()
|
||||
}
|
||||
|
||||
if tt.remove {
|
||||
tt.spec.Remove(tt.item.group, tt.item.kind, tt.item.name)
|
||||
} else {
|
||||
tt.spec.Add(tt.item.group, tt.item.kind, tt.item.name)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expect, tt.spec)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("changes", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
current []string
|
||||
target []string
|
||||
added []string
|
||||
removed []string
|
||||
same []string
|
||||
}{{
|
||||
name: "same",
|
||||
current: []string{"a"},
|
||||
target: []string{"a"},
|
||||
same: []string{"a"},
|
||||
}, {
|
||||
name: "adding one",
|
||||
current: []string{"a"},
|
||||
target: []string{"a", "b"},
|
||||
same: []string{"a"},
|
||||
added: []string{"b"},
|
||||
}, {
|
||||
name: "removing one",
|
||||
current: []string{"a", "b"},
|
||||
target: []string{"a"},
|
||||
same: []string{"a"},
|
||||
removed: []string{"b"},
|
||||
}, {
|
||||
name: "removed to empty",
|
||||
current: []string{"a"},
|
||||
target: []string{},
|
||||
removed: []string{"a"},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
a, r, s := Changes(tt.current, tt.target)
|
||||
require.Equal(t, tt.added, a, "added")
|
||||
require.Equal(t, tt.removed, r, "removed")
|
||||
require.Equal(t, tt.same, s, "same")
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
module github.com/grafana/grafana/apps/scope
|
||||
|
||||
go 1.24.6
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20251007081214-26e147d01f0a
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.11.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/net v0.44.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
google.golang.org/protobuf v1.36.9 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
|
@ -0,0 +1,118 @@
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
|
||||
github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20251007081214-26e147d01f0a h1:L7xgV9mP6MRF3L2/vDOjNR7heaBPbXPMGTDN9/jXSFQ=
|
||||
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20251007081214-26e147d01f0a/go.mod h1:OK8NwS87D5YphchOcAsiIWk/feMZ0EzfAGME1Kff860=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
|
||||
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
|
||||
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
|
@ -0,0 +1,6 @@
|
|||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=scope.grafana.app
|
||||
|
||||
package v0alpha1 // import "github.com/grafana/grafana/apps/pkg/apis/scope/v0alpha1"
|
|
@ -0,0 +1,168 @@
|
|||
package v0alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
GROUP = "scope.grafana.app"
|
||||
VERSION = "v0alpha1"
|
||||
APIVERSION = GROUP + "/" + VERSION
|
||||
)
|
||||
|
||||
var ScopeResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"scopes", "scope", "Scope",
|
||||
func() runtime.Object { return &Scope{} },
|
||||
func() runtime.Object { return &ScopeList{} },
|
||||
utils.TableColumns{
|
||||
Definition: []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Created At", Type: "date"},
|
||||
{Name: "Title", Type: "string"},
|
||||
{Name: "Filters", Type: "array"},
|
||||
},
|
||||
Reader: func(obj any) ([]interface{}, error) {
|
||||
m, ok := obj.(*Scope)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected scope")
|
||||
}
|
||||
return []interface{}{
|
||||
m.Name,
|
||||
m.CreationTimestamp.UTC().Format(time.RFC3339),
|
||||
m.Spec.Title,
|
||||
m.Spec.Filters,
|
||||
}, nil
|
||||
},
|
||||
}, // default table converter
|
||||
)
|
||||
|
||||
var ScopeDashboardBindingResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"scopedashboardbindings", "scopedashboardbinding", "ScopeDashboardBinding",
|
||||
func() runtime.Object { return &ScopeDashboardBinding{} },
|
||||
func() runtime.Object { return &ScopeDashboardBindingList{} },
|
||||
utils.TableColumns{
|
||||
Definition: []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Created At", Type: "date"},
|
||||
{Name: "Dashboard", Type: "string"},
|
||||
{Name: "Scope", Type: "string"},
|
||||
},
|
||||
Reader: func(obj any) ([]interface{}, error) {
|
||||
m, ok := obj.(*ScopeDashboardBinding)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected scope dashboard binding")
|
||||
}
|
||||
return []interface{}{
|
||||
m.Name,
|
||||
m.CreationTimestamp.UTC().Format(time.RFC3339),
|
||||
m.Spec.Dashboard,
|
||||
m.Spec.Scope,
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
var ScopeNavigationResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"scopenavigations", "scopenavigation", "ScopeNavigation",
|
||||
func() runtime.Object { return &ScopeNavigation{} },
|
||||
func() runtime.Object { return &ScopeNavigationList{} },
|
||||
utils.TableColumns{
|
||||
Definition: []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Created At", Type: "date"},
|
||||
{Name: "URL", Type: "string"},
|
||||
{Name: "Scope", Type: "string"},
|
||||
},
|
||||
Reader: func(obj any) ([]interface{}, error) {
|
||||
m, ok := obj.(*ScopeNavigation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected scope navigation")
|
||||
}
|
||||
return []interface{}{
|
||||
m.Name,
|
||||
m.CreationTimestamp.UTC().Format(time.RFC3339),
|
||||
m.Spec.URL,
|
||||
m.Spec.Scope,
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
var ScopeNodeResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
|
||||
"scopenodes", "scopenode", "ScopeNode",
|
||||
func() runtime.Object { return &ScopeNode{} },
|
||||
func() runtime.Object { return &ScopeNodeList{} },
|
||||
utils.TableColumns{
|
||||
Definition: []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Created At", Type: "date"},
|
||||
{Name: "Title", Type: "string"},
|
||||
{Name: "Parent Name", Type: "string"},
|
||||
{Name: "Node Type", Type: "string"},
|
||||
{Name: "Link Type", Type: "string"},
|
||||
{Name: "Link ID", Type: "string"},
|
||||
},
|
||||
Reader: func(obj any) ([]interface{}, error) {
|
||||
m, ok := obj.(*ScopeNode)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected scope node")
|
||||
}
|
||||
return []interface{}{
|
||||
m.Name,
|
||||
m.CreationTimestamp.UTC().Format(time.RFC3339),
|
||||
m.Spec.Title,
|
||||
m.Spec.ParentName,
|
||||
m.Spec.NodeType,
|
||||
m.Spec.LinkType,
|
||||
m.Spec.LinkID,
|
||||
}, nil
|
||||
},
|
||||
}, // default table converter
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: GROUP, Version: VERSION}
|
||||
InternalGroupVersion = schema.GroupVersion{Group: GROUP, Version: runtime.APIVersionInternal}
|
||||
|
||||
// SchemaBuilder is used by standard codegen
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(func(s *runtime.Scheme) error {
|
||||
return AddKnownTypes(SchemeGroupVersion, s)
|
||||
})
|
||||
}
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func AddKnownTypes(gv schema.GroupVersion, scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(gv,
|
||||
&Scope{},
|
||||
&ScopeList{},
|
||||
&ScopeDashboardBinding{},
|
||||
&ScopeDashboardBindingList{},
|
||||
&ScopeNode{},
|
||||
&ScopeNodeList{},
|
||||
&FindScopeNodeChildrenResults{},
|
||||
&FindScopeDashboardBindingsResults{},
|
||||
&ScopeNavigation{},
|
||||
&ScopeNavigationList{},
|
||||
&FindScopeNavigationsResults{},
|
||||
)
|
||||
//metav1.AddToGroupVersion(scheme, gv)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
package v0alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
/*
|
||||
Please keep pkg/promlib/models/query.go and pkg/promlib/models/scope.go in sync
|
||||
with this file until this package is out of the grafana/grafana module.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type Scope struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ScopeSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ScopeSpec struct {
|
||||
Title string `json:"title"`
|
||||
// Provides a default path for the scope. This refers to a list of nodes in the selector. This is used to display the title next to the selected scope and expand the selector to the proper path.
|
||||
// This will override whichever is selected from in the selector.
|
||||
// The path is a list of node ids, starting at the direct parent of the selected node towards the root.
|
||||
// +listType=atomic
|
||||
DefaultPath []string `json:"defaultPath,omitempty"`
|
||||
|
||||
// +listType=atomic
|
||||
Filters []ScopeFilter `json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
type ScopeFilter struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
// Values is used for operators that require multiple values (e.g. one-of and not-one-of).
|
||||
// +listType=atomic
|
||||
Values []string `json:"values,omitempty"`
|
||||
Operator FilterOperator `json:"operator"`
|
||||
}
|
||||
|
||||
// Type of the filter operator.
|
||||
// +enum
|
||||
type FilterOperator string
|
||||
|
||||
// Defines values for FilterOperator.
|
||||
const (
|
||||
FilterOperatorEquals FilterOperator = "equals"
|
||||
FilterOperatorNotEquals FilterOperator = "not-equals"
|
||||
FilterOperatorRegexMatch FilterOperator = "regex-match"
|
||||
FilterOperatorRegexNotMatch FilterOperator = "regex-not-match"
|
||||
FilterOperatorOneOf FilterOperator = "one-of"
|
||||
FilterOperatorNotOneOf FilterOperator = "not-one-of"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []Scope `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeDashboardBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ScopeDashboardBindingSpec `json:"spec,omitempty"`
|
||||
Status ScopeDashboardBindingStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeDashboardBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ScopeDashboardBinding `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type FindScopeDashboardBindingsResults struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Items []ScopeDashboardBinding `json:"items,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeNode struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ScopeNodeSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ScopeDashboardBindingSpec struct {
|
||||
Dashboard string `json:"dashboard"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// Type of the item.
|
||||
// +enum
|
||||
// ScopeDashboardBindingStatus contains derived information about a ScopeDashboardBinding.
|
||||
type ScopeDashboardBindingStatus struct {
|
||||
// DashboardTitle should be populated and update from the dashboard
|
||||
DashboardTitle string `json:"dashboardTitle"`
|
||||
|
||||
// Groups is used for the grouping of dashboards that are suggested based
|
||||
// on a scope. The source of truth for this information has not been
|
||||
// determined yet.
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
|
||||
// DashboardTitleConditions is a list of conditions that are used to determine if the dashboard title is valid.
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
DashboardTitleConditions []metav1.Condition `json:"dashboardTitleConditions,omitempty"`
|
||||
|
||||
// DashboardTitleConditions is a list of conditions that are used to determine if the list of groups is valid.
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
GroupsConditions []metav1.Condition `json:"groupsConditions,omitempty"`
|
||||
}
|
||||
|
||||
type NodeType string
|
||||
|
||||
// Defines values for ItemType.
|
||||
const (
|
||||
NodeTypeContainer NodeType = "container"
|
||||
NodeTypeLeaf NodeType = "leaf"
|
||||
)
|
||||
|
||||
// Type of the item.
|
||||
// +enum
|
||||
type LinkType string
|
||||
|
||||
// Defines values for ItemType.
|
||||
const (
|
||||
LinkTypeScope LinkType = "scope"
|
||||
)
|
||||
|
||||
type ScopeNodeSpec struct {
|
||||
//+optional
|
||||
ParentName string `json:"parentName,omitempty"`
|
||||
|
||||
NodeType NodeType `json:"nodeType"` // container | leaf
|
||||
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description,omitempty"`
|
||||
DisableMultiSelect bool `json:"disableMultiSelect"`
|
||||
|
||||
LinkType LinkType `json:"linkType,omitempty"` // scope (later more things)
|
||||
LinkID string `json:"linkId,omitempty"` // the k8s name
|
||||
// ?? should this be a slice of links
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeNodeList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ScopeNode `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type FindScopeNodeChildrenResults struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ScopeNode `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
// Scoped navigation types
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeNavigation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ScopeNavigationSpec `json:"spec,omitempty"`
|
||||
Status ScopeNavigationStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ScopeNavigationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ScopeNavigation `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type FindScopeNavigationsResults struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Items []ScopeNavigation `json:"items,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type ScopeNavigationSpec struct {
|
||||
URL string `json:"url"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// Type of the item.
|
||||
// +enum
|
||||
// ScopeNavigationStatus contains derived information about a ScopeNavigation.
|
||||
type ScopeNavigationStatus struct {
|
||||
// Title should be populated and update from the dashboard
|
||||
Title string `json:"title"`
|
||||
|
||||
// Groups is used for the grouping of dashboards that are suggested based
|
||||
// on a scope. The source of truth for this information has not been
|
||||
// determined yet.
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
|
||||
// TitleConditions is a list of conditions that are used to determine if the title is valid.
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
TitleConditions []metav1.Condition `json:"titleConditions,omitempty"`
|
||||
|
||||
// GroupsConditions is a list of conditions that are used to determine if the list of groups is valid.
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
GroupsConditions []metav1.Condition `json:"groupsConditions,omitempty"`
|
||||
}
|
||||
|
||||
// Type of the filter operator.
|
||||
// +enum
|
||||
type ScopeNavigationLinkType string
|
||||
|
||||
// Defines values for FilterOperator.
|
||||
const (
|
||||
ScopeNavigationLinkTypeURL ScopeNavigationLinkType = "url"
|
||||
)
|
|
@ -0,0 +1,519 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FindScopeDashboardBindingsResults) DeepCopyInto(out *FindScopeDashboardBindingsResults) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ScopeDashboardBinding, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindScopeDashboardBindingsResults.
|
||||
func (in *FindScopeDashboardBindingsResults) DeepCopy() *FindScopeDashboardBindingsResults {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FindScopeDashboardBindingsResults)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FindScopeDashboardBindingsResults) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FindScopeNavigationsResults) DeepCopyInto(out *FindScopeNavigationsResults) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ScopeNavigation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindScopeNavigationsResults.
|
||||
func (in *FindScopeNavigationsResults) DeepCopy() *FindScopeNavigationsResults {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FindScopeNavigationsResults)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FindScopeNavigationsResults) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FindScopeNodeChildrenResults) DeepCopyInto(out *FindScopeNodeChildrenResults) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ScopeNode, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindScopeNodeChildrenResults.
|
||||
func (in *FindScopeNodeChildrenResults) DeepCopy() *FindScopeNodeChildrenResults {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FindScopeNodeChildrenResults)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *FindScopeNodeChildrenResults) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Scope) DeepCopyInto(out *Scope) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scope.
|
||||
func (in *Scope) DeepCopy() *Scope {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Scope)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Scope) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeDashboardBinding) DeepCopyInto(out *ScopeDashboardBinding) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBinding.
|
||||
func (in *ScopeDashboardBinding) DeepCopy() *ScopeDashboardBinding {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeDashboardBinding)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeDashboardBinding) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeDashboardBindingList) DeepCopyInto(out *ScopeDashboardBindingList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ScopeDashboardBinding, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingList.
|
||||
func (in *ScopeDashboardBindingList) DeepCopy() *ScopeDashboardBindingList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeDashboardBindingList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeDashboardBindingList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeDashboardBindingSpec) DeepCopyInto(out *ScopeDashboardBindingSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingSpec.
|
||||
func (in *ScopeDashboardBindingSpec) DeepCopy() *ScopeDashboardBindingSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeDashboardBindingSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeDashboardBindingStatus) DeepCopyInto(out *ScopeDashboardBindingStatus) {
|
||||
*out = *in
|
||||
if in.Groups != nil {
|
||||
in, out := &in.Groups, &out.Groups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.DashboardTitleConditions != nil {
|
||||
in, out := &in.DashboardTitleConditions, &out.DashboardTitleConditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GroupsConditions != nil {
|
||||
in, out := &in.GroupsConditions, &out.GroupsConditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingStatus.
|
||||
func (in *ScopeDashboardBindingStatus) DeepCopy() *ScopeDashboardBindingStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeDashboardBindingStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeFilter) DeepCopyInto(out *ScopeFilter) {
|
||||
*out = *in
|
||||
if in.Values != nil {
|
||||
in, out := &in.Values, &out.Values
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeFilter.
|
||||
func (in *ScopeFilter) DeepCopy() *ScopeFilter {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeFilter)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeList) DeepCopyInto(out *ScopeList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Scope, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeList.
|
||||
func (in *ScopeList) DeepCopy() *ScopeList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNavigation) DeepCopyInto(out *ScopeNavigation) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigation.
|
||||
func (in *ScopeNavigation) DeepCopy() *ScopeNavigation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNavigation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeNavigation) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNavigationList) DeepCopyInto(out *ScopeNavigationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ScopeNavigation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigationList.
|
||||
func (in *ScopeNavigationList) DeepCopy() *ScopeNavigationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNavigationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeNavigationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNavigationSpec) DeepCopyInto(out *ScopeNavigationSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigationSpec.
|
||||
func (in *ScopeNavigationSpec) DeepCopy() *ScopeNavigationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNavigationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNavigationStatus) DeepCopyInto(out *ScopeNavigationStatus) {
|
||||
*out = *in
|
||||
if in.Groups != nil {
|
||||
in, out := &in.Groups, &out.Groups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TitleConditions != nil {
|
||||
in, out := &in.TitleConditions, &out.TitleConditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GroupsConditions != nil {
|
||||
in, out := &in.GroupsConditions, &out.GroupsConditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigationStatus.
|
||||
func (in *ScopeNavigationStatus) DeepCopy() *ScopeNavigationStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNavigationStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNode) DeepCopyInto(out *ScopeNode) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNode.
|
||||
func (in *ScopeNode) DeepCopy() *ScopeNode {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNode)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeNode) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNodeList) DeepCopyInto(out *ScopeNodeList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ScopeNode, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNodeList.
|
||||
func (in *ScopeNodeList) DeepCopy() *ScopeNodeList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNodeList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScopeNodeList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeNodeSpec) DeepCopyInto(out *ScopeNodeSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNodeSpec.
|
||||
func (in *ScopeNodeSpec) DeepCopy() *ScopeNodeSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeNodeSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScopeSpec) DeepCopyInto(out *ScopeSpec) {
|
||||
*out = *in
|
||||
if in.DefaultPath != nil {
|
||||
in, out := &in.DefaultPath, &out.DefaultPath
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Filters != nil {
|
||||
in, out := &in.Filters, &out.Filters
|
||||
*out = make([]ScopeFilter, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSpec.
|
||||
func (in *ScopeSpec) DeepCopy() *ScopeSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScopeSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,934 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
// Code generated by openapi-gen. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
import (
|
||||
common "k8s.io/kube-openapi/pkg/common"
|
||||
spec "k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
|
||||
return map[string]common.OpenAPIDefinition{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.FindScopeDashboardBindingsResults": schema_pkg_apis_scope_v0alpha1_FindScopeDashboardBindingsResults(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.FindScopeNavigationsResults": schema_pkg_apis_scope_v0alpha1_FindScopeNavigationsResults(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.FindScopeNodeChildrenResults": schema_pkg_apis_scope_v0alpha1_FindScopeNodeChildrenResults(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.Scope": schema_pkg_apis_scope_v0alpha1_Scope(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBinding(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingList": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingList(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingStatus(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeFilter": schema_pkg_apis_scope_v0alpha1_ScopeFilter(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeList": schema_pkg_apis_scope_v0alpha1_ScopeList(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation": schema_pkg_apis_scope_v0alpha1_ScopeNavigation(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationList": schema_pkg_apis_scope_v0alpha1_ScopeNavigationList(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationSpec": schema_pkg_apis_scope_v0alpha1_ScopeNavigationSpec(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationStatus": schema_pkg_apis_scope_v0alpha1_ScopeNavigationStatus(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode": schema_pkg_apis_scope_v0alpha1_ScopeNode(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeList": schema_pkg_apis_scope_v0alpha1_ScopeNodeList(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeSpec": schema_pkg_apis_scope_v0alpha1_ScopeNodeSpec(ref),
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeSpec": schema_pkg_apis_scope_v0alpha1_ScopeSpec(ref),
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_FindScopeDashboardBindingsResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"message": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_FindScopeNavigationsResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"message": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_FindScopeNodeChildrenResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_Scope(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeSpec"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec"),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec", "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"dashboard": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"scope": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"dashboard", "scope"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Type of the item. ScopeDashboardBindingStatus contains derived information about a ScopeDashboardBinding.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"dashboardTitle": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "DashboardTitle should be populated and update from the dashboard",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"groups": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Groups is used for the grouping of dashboards that are suggested based on a scope. The source of truth for this information has not been determined yet.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"dashboardTitleConditions": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-map-keys": []interface{}{
|
||||
"type",
|
||||
},
|
||||
"x-kubernetes-list-type": "map",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "DashboardTitleConditions is a list of conditions that are used to determine if the dashboard title is valid.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"groupsConditions": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-map-keys": []interface{}{
|
||||
"type",
|
||||
},
|
||||
"x-kubernetes-list-type": "map",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "DashboardTitleConditions is a list of conditions that are used to determine if the list of groups is valid.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"dashboardTitle"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeFilter(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"key": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"value": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"values": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-type": "atomic",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Values is used for operators that require multiple values (e.g. one-of and not-one-of).",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"operator": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Possible enum values:\n - `\"equals\"`\n - `\"not-equals\"`\n - `\"not-one-of\"`\n - `\"one-of\"`\n - `\"regex-match\"`\n - `\"regex-not-match\"`",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
Enum: []interface{}{"equals", "not-equals", "not-one-of", "one-of", "regex-match", "regex-not-match"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"key", "value", "operator"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.Scope"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.Scope", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNavigation(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationSpec"),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationStatus"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationSpec", "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNavigationList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNavigationSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"url": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"scope": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"url", "scope"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNavigationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Type of the item. ScopeNavigationStatus contains derived information about a ScopeNavigation.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Title should be populated and update from the dashboard",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"groups": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Groups is used for the grouping of dashboards that are suggested based on a scope. The source of truth for this information has not been determined yet.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"titleConditions": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-map-keys": []interface{}{
|
||||
"type",
|
||||
},
|
||||
"x-kubernetes-list-type": "map",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "TitleConditions is a list of conditions that are used to determine if the title is valid.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"groupsConditions": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-map-keys": []interface{}{
|
||||
"type",
|
||||
},
|
||||
"x-kubernetes-list-type": "map",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "GroupsConditions is a list of conditions that are used to determine if the list of groups is valid.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"title"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNode(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeSpec"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNodeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeNodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"parentName": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"nodeType": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"description": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"disableMultiSelect": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: false,
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"linkType": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Possible enum values:\n - `\"scope\"`",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
Enum: []interface{}{"scope"},
|
||||
},
|
||||
},
|
||||
"linkId": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "scope (later more things)",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"nodeType", "title", "disableMultiSelect"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_scope_v0alpha1_ScopeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"defaultPath": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-type": "atomic",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Provides a default path for the scope. This refers to a list of nodes in the selector. This is used to display the title next to the selected scope and expand the selector to the proper path. This will override whichever is selected from in the selector. The path is a list of node ids, starting at the direct parent of the selected node towards the root.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"filters": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-type": "atomic",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeFilter"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"title"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeFilter"},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,FindScopeDashboardBindingsResults,Items
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,FindScopeNavigationsResults,Items
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeDashboardBindingStatus,Groups
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNavigationStatus,Groups
|
||||
API rule violation: names_match,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNodeSpec,LinkID
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,FindScopeNodeChildrenResults,Items
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeDashboardBindingList,Items
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeList,Items
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNavigationList,Items
|
||||
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNodeList,Items
|
|
@ -14,7 +14,30 @@ Once your feature toggle is defined, you can then wrap your feature around a che
|
|||
Examples:
|
||||
|
||||
- [Backend](https://github.com/grafana/grafana/blob/feb2b5878b3e3ec551d64872c35edec2a0187812/pkg/services/authn/clients/session.go#L57): Use the `IsEnabled` function and pass in your feature toggle.
|
||||
- [Frontend](https://github.com/grafana/grafana/blob/feb2b5878b3e3ec551d64872c35edec2a0187812/public/app/features/search/service/folders.ts#L14): Check the config for your feature toggle.
|
||||
|
||||
### Frontend
|
||||
|
||||
Use the new OpenFeature-based feature flag client for all new feature flags. There are some differences compared to the legacy `config.featureToggles` system:
|
||||
|
||||
- Feature flag initialisation is async, but will be finished by the time the UI is rendered. This means you cannot get the value of a feature flag at the 'top level' of a module/file
|
||||
- Call `evaluateBooleanFlag("flagName")` from `@grafana/runtime/internal` instead to get the value of a feature flag
|
||||
- Feature flag values _may_ change over the lifetime of the session. Do not store the value in a variable that is used for longer than a single render - always call `evaluateBooleanFlag` lazily when you use the value.
|
||||
|
||||
e.g.
|
||||
|
||||
```ts
|
||||
import { evaluateBooleanFlag } from '@grafana/runtime/internal';
|
||||
|
||||
// BAD - Don't do this. The feature toggle will not evaluate correctly
|
||||
const isEnabled = evaluateBooleanFlag('newPreferences', false);
|
||||
|
||||
function makeAPICall() {
|
||||
// GOOD - The feature toggle should be called after app initialisation
|
||||
if (evaluateBooleanFlag('newPreferences', false)) {
|
||||
// do new things
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Enabling toggles in development
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ resource "grafana_role" "my_new_role" {
|
|||
description = "My test role"
|
||||
version = 1
|
||||
uid = "newroleuid"
|
||||
global = true
|
||||
global = false
|
||||
|
||||
permissions {
|
||||
action = "org.users:add"
|
||||
|
|
|
@ -0,0 +1,347 @@
|
|||
---
|
||||
aliases:
|
||||
- ../data-sources/prometheus/
|
||||
- ../features/datasources/prometheus/
|
||||
description: Guide for authenticating with Amazon Managed Service for Prometheus in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- prometheus
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Authenticating with SigV4
|
||||
title: Configure the Prometheus data source
|
||||
weight: 200
|
||||
refs:
|
||||
intro-to-prometheus:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
|
||||
exemplars:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
|
||||
configure-data-links-value-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
|
||||
alerting-alert-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/
|
||||
add-a-data-source:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
|
||||
prom-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
|
||||
default-manage-alerts-ui-toggle:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
provision-grafana:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
|
||||
manage-alerts-toggle:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
manage-recording-rules-toggle:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
azure-active-directory:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
|
||||
configure-grafana-configuration-file-location:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
---
|
||||
|
||||
# Connect to Amazon Managed Service for Prometheus
|
||||
|
||||
1. In the data source configuration page, locate the **Auth** section
|
||||
2. Enable **SigV4 auth**
|
||||
3. Configure the following settings:
|
||||
|
||||
| Setting | Description | Example |
|
||||
| --------------------------- | ---------------------------------------------- | --------------------------------------------------------------- |
|
||||
| **Authentication Provider** | Choose your auth method | `AWS SDK Default`, `Access & secret key`, or `Credentials file` |
|
||||
| **Default Region** | AWS region for your workspace | `us-west-2` |
|
||||
| **Access Key ID** | Your AWS access key (if using access key auth) | `AKIA...` |
|
||||
| **Secret Access Key** | Your AWS secret key (if using access key auth) | `wJalrXUtn...` |
|
||||
| **Assume Role ARN** | IAM role ARN (optional) | `arn:aws:iam::123456789:role/GrafanaRole` |
|
||||
|
||||
4. Set the **HTTP URL** to your Amazon Managed Service for Prometheus workspace endpoint: `https://aps-workspaces.us-west-2.amazonaws.com/workspaces/ws-12345678-1234-1234-1234-123456789012/`
|
||||
|
||||
5. Click **Save & test** to verify the connection
|
||||
|
||||
## Example configuration
|
||||
|
||||
```yaml
|
||||
# Example provisioning configuration
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: 'Amazon Managed Prometheus'
|
||||
type: 'grafana-amazonprometheus-datasource'
|
||||
url: 'https://aps-workspaces.us-west-2.amazonaws.com/workspaces/ws-12345678-1234-1234-1234-123456789012/'
|
||||
jsonData:
|
||||
httpMethod: 'POST'
|
||||
sigV4Auth: true
|
||||
sigV4AuthType: 'keys'
|
||||
sigV4Region: 'us-east-2'
|
||||
secureJsonData:
|
||||
sigV4AccessKey: '<access key>'
|
||||
sigV4SecretKey: '<secret key>'
|
||||
```
|
||||
|
||||
## Migrate to Amazon Managed Service for Prometheus
|
||||
|
||||
Learn more about why this is happening: [Prometheus data source update: Redefining our big tent philosophy](https://grafana.com/blog/2025/06/16/prometheus-data-source-update-redefining-our-big-tent-philosophy/)
|
||||
|
||||
Before you begin, ensure you have the organization administrator role. If you are self-hosting Grafana, back up your existing dashboard configurations and queries.
|
||||
|
||||
Grafana Cloud users will be automatically migrated to the relevant version of Prometheus, so no action needs to be taken.
|
||||
|
||||
For air-gapped environments, download and install [Amazon Managed Service for Prometheus](https://grafana.com/grafana/plugins/grafana-amazonprometheus-datasource/), then follow the standard migration process.
|
||||
|
||||
### Migrate
|
||||
|
||||
1. Enable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
|
||||
2. Restart Grafana for the changes to take effect.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
This feature toggle will be removed in Grafana 13, and the migration will be automatic.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Check migration status
|
||||
|
||||
To determine if your Prometheus data sources have been migrated:
|
||||
|
||||
1. Navigate to **Connections** > **Data sources**
|
||||
2. Select your Prometheus data source
|
||||
3. Look for a migration banner at the top of the configuration page
|
||||
|
||||
The banner displays one of the following messages:
|
||||
|
||||
- **"Migration Notice"** - The data source has already been migrated
|
||||
- **"Deprecation Notice"** - The data source has not been migrated
|
||||
- **No banner** - No migration is needed for this data source
|
||||
|
||||
## Common migration issues
|
||||
|
||||
The following sections contain troubleshooting guidance.
|
||||
|
||||
**Migration banner not appearing**
|
||||
|
||||
- Verify the `prometheusTypeMigration` feature toggle is enabled
|
||||
- Restart Grafana after enabling the feature toggle
|
||||
|
||||
**Amazon Managed Service for Prometheus is not installed**
|
||||
|
||||
- Verify that Amazon Managed Service for Prometheus is installed by going to **Connections** > **Add new connection** and search for "Amazon Managed Service for Prometheus"
|
||||
- Install Amazon Managed Service for Prometheus if not already installed
|
||||
|
||||
**After migrating, my data source returns "401 Unauthorized"**
|
||||
|
||||
- If you are using self-hosted Grafana, check your .ini for `grafana-amazonprometheus-datasource` is included in `forward_settings_to_plugins` under the `[aws]` heading.
|
||||
- If you are using Grafana Cloud, contact Grafana support.
|
||||
|
||||
### Rollback self-hosted Grafana without a backup
|
||||
|
||||
If you don’t have a backup of your Grafana instance before the migration, remove the `prometheusTypeMigration` feature toggle, and run the following script. It reverts all Amazon Managed Service for Prometheus data sources back to core Prometheus.
|
||||
|
||||
To revert the migration:
|
||||
|
||||
1. Disable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
|
||||
2. Obtain a bearer token that has `read` and `write` permissions for your Grafana data source API. For more information on the data source API, refer to [Data source API](/docs/grafana/<GRAFANA_VERSION>/developers/http_api/data_source/).
|
||||
3. Run the script below. Make sure to provide your Grafana URL and bearer token.
|
||||
4. (Optional) Report the issue you were experiencing on the [Grafana repository](https://github.com/grafana/grafana/issues). Tag the issue with "datasource/migrate-prometheus-type"
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Configuration
|
||||
GRAFANA_URL=""
|
||||
BEARER_TOKEN=""
|
||||
LOG_FILE="grafana_migration_$(date +%Y%m%d_%H%M%S).log"
|
||||
|
||||
# Function to log messages to both console and file
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo "[$timestamp] $message" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to update a data source
|
||||
update_data_source() {
|
||||
local uid="$1"
|
||||
local data="$2"
|
||||
|
||||
response=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $BEARER_TOKEN" \
|
||||
-d "$data" \
|
||||
"$GRAFANA_URL/api/datasources/uid/$uid")
|
||||
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
response_body=$(echo "$response" | sed '$d')
|
||||
|
||||
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
|
||||
log_message "$uid successful"
|
||||
else
|
||||
log_message "$uid error: HTTP $http_code - $response_body"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to process and update data source types
|
||||
update_data_source_type() {
|
||||
local result="$1"
|
||||
local processed_count=0
|
||||
local updated_count=0
|
||||
local readonly_count=0
|
||||
local skipped_count=0
|
||||
|
||||
# Use jq to parse and process JSON
|
||||
echo "$result" | jq -c '.[]' | while read -r data; do
|
||||
uid=$(echo "$data" | jq -r '.uid')
|
||||
prometheus_type_migration=$(echo "$data" | jq -r '.jsonData["prometheus-type-migration"] // false')
|
||||
data_type=$(echo "$data" | jq -r '.type')
|
||||
read_only=$(echo "$data" | jq -r '.readOnly // false')
|
||||
|
||||
processed_count=$((processed_count + 1))
|
||||
|
||||
# Check conditions
|
||||
if [[ "$prometheus_type_migration" != "true" ]] || [[ "$data_type" != "grafana-amazonprometheus-datasource" ]]; then
|
||||
skipped_count=$((skipped_count + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$read_only" == "true" ]]; then
|
||||
readonly_count=$((readonly_count + 1))
|
||||
log_message "$uid is readOnly. If this data source is provisioned, edit the data source type to be \`prometheus\` in the provisioning file."
|
||||
continue
|
||||
fi
|
||||
|
||||
# Update the data
|
||||
updated_data=$(echo "$data" | jq '.type = "prometheus" | .jsonData["prometheus-type-migration"] = false')
|
||||
update_data_source "$uid" "$updated_data"
|
||||
updated_count=$((updated_count + 1))
|
||||
|
||||
# Log the raw data for debugging (optional - uncomment if needed)
|
||||
# log_message "DEBUG - Updated data for $uid: $updated_data"
|
||||
done
|
||||
|
||||
# Note: These counts won't work in the while loop due to subshell
|
||||
# Moving summary to the main function instead
|
||||
}
|
||||
|
||||
# Function to get summary statistics
|
||||
get_summary_stats() {
|
||||
local result="$1"
|
||||
local total_datasources=$(echo "$result" | jq '. | length')
|
||||
local migration_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-amazonprometheus-datasource")] | length')
|
||||
local readonly_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-amazonprometheus-datasource" and .readOnly == true)] | length')
|
||||
local updateable_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-amazonprometheus-datasource" and (.readOnly == false or .readOnly == null))] | length')
|
||||
|
||||
log_message "=== MIGRATION SUMMARY ==="
|
||||
log_message "Total data sources found: $total_datasources"
|
||||
log_message "Migration candidates found: $migration_candidates"
|
||||
log_message "Read-only candidates (will be skipped): $readonly_candidates"
|
||||
log_message "Updateable candidates: $updateable_candidates"
|
||||
log_message "=========================="
|
||||
}
|
||||
|
||||
# Main function to remove Prometheus type migration
|
||||
remove_prometheus_type_migration() {
|
||||
log_message "Starting remove Azure Prometheus migration"
|
||||
log_message "Log file: $LOG_FILE"
|
||||
log_message "Grafana URL: $GRAFANA_URL"
|
||||
|
||||
response=$(curl -s -w "\n%{http_code}" -X GET \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $BEARER_TOKEN" \
|
||||
"$GRAFANA_URL/api/datasources/")
|
||||
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
response_body=$(echo "$response" | sed '$d')
|
||||
|
||||
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
|
||||
log_message "Successfully fetched data sources"
|
||||
get_summary_stats "$response_body"
|
||||
update_data_source_type "$response_body"
|
||||
log_message "Migration process completed"
|
||||
else
|
||||
log_message "error fetching data sources: HTTP $http_code - $response_body"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to initialize log file
|
||||
initialize_log() {
|
||||
echo "=== Grafana Azure Prometheus Migration Log ===" > "$LOG_FILE"
|
||||
echo "Started at: $(date)" >> "$LOG_FILE"
|
||||
echo "=============================================" >> "$LOG_FILE"
|
||||
echo "" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Check if jq is installed
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "Error: jq is required but not installed. Please install jq to run this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if required variables are set
|
||||
if [[ -z "$GRAFANA_URL" || -z "$BEARER_TOKEN" ]]; then
|
||||
echo "Error: Please set GRAFANA_URL and BEARER_TOKEN variables at the top of the script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Initialize log file
|
||||
initialize_log
|
||||
|
||||
# Execute main function
|
||||
log_message "Script started"
|
||||
remove_prometheus_type_migration
|
||||
log_message "Script completed"
|
||||
|
||||
# Final log message
|
||||
echo ""
|
||||
echo "Migration completed. Full log available at: $LOG_FILE"
|
||||
```
|
||||
|
||||
If you continue to experience issues, check the Grafana server logs for detailed error messages and contact [Grafana Support](https://grafana.com/help/) with your troubleshooting results.
|
|
@ -0,0 +1,359 @@
|
|||
---
|
||||
aliases:
|
||||
- ../data-sources/prometheus/
|
||||
- ../features/datasources/prometheus/
|
||||
description: Guide for authenticating with Azure Monitor Managed Service for Prometheus in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- prometheus
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Authenticating with Azure
|
||||
title: Configure the Prometheus data source
|
||||
weight: 200
|
||||
refs:
|
||||
intro-to-prometheus:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
|
||||
exemplars:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
|
||||
configure-data-links-value-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
|
||||
alerting-alert-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/
|
||||
add-a-data-source:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
|
||||
prom-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
|
||||
default-manage-alerts-ui-toggle:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
provision-grafana:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
|
||||
manage-alerts-toggle:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
|
||||
manage-recording-rules-toggle:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
azure-active-directory:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
|
||||
configure-grafana-configuration-file-location:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
---
|
||||
|
||||
# Connect to Azure Monitor Managed Service for Prometheus
|
||||
|
||||
After creating a Azure Monitor Managed Service for Prometheus data source:
|
||||
|
||||
1. In the data source configuration page, locate the **Authentication** section
|
||||
2. Select your authentication method:
|
||||
- **Managed Identity**: For Azure-hosted Grafana instances. To learn more about Entra login for Grafana, refer to [Configure Azure AD/Entra ID OAuth authentication](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#configure-azure-adentra-id-oauth-authentication)
|
||||
- **App Registration**: For service principal authentication
|
||||
- **Current User**: Uses the current user's Azure AD credentials
|
||||
|
||||
3. Configure based on your chosen method:
|
||||
|
||||
| Setting | Description | Example |
|
||||
| --------------------------- | ------------------------------- | -------------------------------------- |
|
||||
| **Directory (tenant) ID** | Your Azure AD tenant ID | `12345678-1234-1234-1234-123456789012` |
|
||||
| **Application (client) ID** | Your app registration client ID | `87654321-4321-4321-4321-210987654321` |
|
||||
| **Client secret** | Your app registration secret | `your-client-secret` |
|
||||
|
||||
When using Managed Identity for authentication:
|
||||
|
||||
- No additional configuration required if using system-assigned identity.
|
||||
- For user-assigned identity, provide the **Client ID**.
|
||||
|
||||
4. Set the **Prometheus server URL** to your Azure Monitor workspace endpoint:
|
||||
|
||||
```
|
||||
https://your-workspace.eastus2.prometheus.monitor.azure.com
|
||||
```
|
||||
|
||||
5. Click **Save & test** to verify the connection
|
||||
|
||||
## Example configuration
|
||||
|
||||
```yaml
|
||||
# Example provisioning configuration for App Registration
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: 'Azure Monitor Prometheus'
|
||||
type: 'grafana-azureprometheus-datasource'
|
||||
url: 'https://your-workspace.eastus2.prometheus.monitor.azure.com'
|
||||
jsonData:
|
||||
azureCredentials:
|
||||
authType: 'clientsecret'
|
||||
azureCloud: 'AzureCloud'
|
||||
clientId: '<client_id>'
|
||||
httpMethod: 'POST'
|
||||
tenantId: '<tenant_id>'
|
||||
secureJsonData:
|
||||
clientSecret: 'your-client-secret'
|
||||
```
|
||||
|
||||
## Migrate to Azure Monitor Managed Service for Prometheus
|
||||
|
||||
Learn more about why this is happening: [Prometheus data source update: Redefining our big tent philosophy](https://grafana.com/blog/2025/06/16/prometheus-data-source-update-redefining-our-big-tent-philosophy/)
|
||||
|
||||
Before you begin, ensure you have the organization administrator role. If you are self-hosting Grafana, back up your existing dashboard configurations and queries.
|
||||
|
||||
Grafana Cloud users will be automatically migrated to the relevant version of Prometheus, so no action needs to be taken.
|
||||
|
||||
For air-gapped environments, download and install [Azure Monitor Managed Service for Prometheus](https://grafana.com/grafana/plugins/grafana-azureprometheus-datasource/), then follow the standard migration process.
|
||||
|
||||
### Migrate
|
||||
|
||||
1. Enable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
|
||||
2. Restart Grafana for the changes to take effect.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
This feature toggle will be removed in Grafana 13, and the migration will be automatic.
|
||||
{{< /admonition >}}
|
||||
|
||||
To determine if your Prometheus data sources have been migrated:
|
||||
|
||||
1. Navigate to **Connections** > **Data sources**
|
||||
2. Select your Prometheus data source
|
||||
3. Look for a migration banner at the top of the configuration page
|
||||
|
||||
The banner displays one of the following messages:
|
||||
|
||||
- **"Migration Notice"** - The data source has already been migrated
|
||||
- **"Deprecation Notice"** - The data source has not been migrated
|
||||
- **No banner** - No migration is needed for this data source
|
||||
|
||||
## Common migration issues
|
||||
|
||||
The following sections contain troubleshooting guidance.
|
||||
|
||||
**Migration banner not appearing**
|
||||
|
||||
- Verify the `prometheusTypeMigration` feature toggle is enabled.
|
||||
- Restart Grafana after enabling the feature toggle
|
||||
|
||||
**Azure Monitor Managed Service for Prometheus is not installed**
|
||||
|
||||
- Verify that Azure Monitor Managed Service for Prometheus is installed by going to **Connections** > **Add new connection** and search for "Azure Monitor Managed Service for Prometheus"
|
||||
- Install Azure Monitor Managed Service for Prometheus if not already installed
|
||||
|
||||
**After migrating, my data source returns "401 Unauthorized"**
|
||||
|
||||
- If you are using self-hosted Grafana, check your .ini for `grafana-azureprometheus-datasource` is included in `forward_settings_to_plugins` under the `[azure]` heading.
|
||||
- If you are using Grafana Cloud, contact Grafana support.
|
||||
|
||||
### Rollback self-hosted Grafana without a backup
|
||||
|
||||
If you don’t have a backup of your Grafana instance before the migration, remove the `prometheusTypeMigration` feature toggle, and run the following script. It reverts all the Azure Monitor Managed Service data source instances back to core Prometheus.
|
||||
|
||||
To revert the migration:
|
||||
|
||||
1. Disable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
|
||||
2. Obtain a bearer token that has `read` and `write` permissions for your Grafana data source API. For more information on the data source API, refer to [Data source API](/docs/grafana/<GRAFANA_VERSION>/developers/http_api/data_source/).
|
||||
3. Run the script below. Make sure to provide your Grafana URL and bearer token.
|
||||
4. (Optional) Report the issue you were experiencing on the [Grafana repository](https://github.com/grafana/grafana/issues). Tag the issue with "datasource/migrate-prometheus-type"
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# Configuration
|
||||
GRAFANA_URL=""
|
||||
BEARER_TOKEN=""
|
||||
LOG_FILE="grafana_migration_$(date +%Y%m%d_%H%M%S).log"
|
||||
|
||||
# Function to log messages to both console and file
|
||||
log_message() {
|
||||
local message="$1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo "[$timestamp] $message" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to update a data source
|
||||
update_data_source() {
|
||||
local uid="$1"
|
||||
local data="$2"
|
||||
|
||||
response=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $BEARER_TOKEN" \
|
||||
-d "$data" \
|
||||
"$GRAFANA_URL/api/datasources/uid/$uid")
|
||||
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
response_body=$(echo "$response" | sed '$d')
|
||||
|
||||
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
|
||||
log_message "$uid successful"
|
||||
else
|
||||
log_message "$uid error: HTTP $http_code - $response_body"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to process and update data source types
|
||||
update_data_source_type() {
|
||||
local result="$1"
|
||||
local processed_count=0
|
||||
local updated_count=0
|
||||
local readonly_count=0
|
||||
local skipped_count=0
|
||||
|
||||
# Use jq to parse and process JSON
|
||||
echo "$result" | jq -c '.[]' | while read -r data; do
|
||||
uid=$(echo "$data" | jq -r '.uid')
|
||||
prometheus_type_migration=$(echo "$data" | jq -r '.jsonData["prometheus-type-migration"] // false')
|
||||
data_type=$(echo "$data" | jq -r '.type')
|
||||
read_only=$(echo "$data" | jq -r '.readOnly // false')
|
||||
|
||||
processed_count=$((processed_count + 1))
|
||||
|
||||
# Check conditions
|
||||
if [[ "$prometheus_type_migration" != "true" ]] || [[ "$data_type" != "grafana-azureprometheus-datasource" ]]; then
|
||||
skipped_count=$((skipped_count + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$read_only" == "true" ]]; then
|
||||
readonly_count=$((readonly_count + 1))
|
||||
log_message "$uid is readOnly. If this data source is provisioned, edit the data source type to be \`prometheus\` in the provisioning file."
|
||||
continue
|
||||
fi
|
||||
|
||||
# Update the data
|
||||
updated_data=$(echo "$data" | jq '.type = "prometheus" | .jsonData["prometheus-type-migration"] = false')
|
||||
update_data_source "$uid" "$updated_data"
|
||||
updated_count=$((updated_count + 1))
|
||||
|
||||
# Log the raw data for debugging (optional - uncomment if needed)
|
||||
# log_message "DEBUG - Updated data for $uid: $updated_data"
|
||||
done
|
||||
|
||||
# Note: These counts won't work in the while loop due to subshell
|
||||
# Moving summary to the main function instead
|
||||
}
|
||||
|
||||
# Function to get summary statistics
|
||||
get_summary_stats() {
|
||||
local result="$1"
|
||||
local total_datasources=$(echo "$result" | jq '. | length')
|
||||
local migration_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-azureprometheus-datasource")] | length')
|
||||
local readonly_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-azureprometheus-datasource" and .readOnly == true)] | length')
|
||||
local updateable_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-azureprometheus-datasource" and (.readOnly == false or .readOnly == null))] | length')
|
||||
|
||||
log_message "=== MIGRATION SUMMARY ==="
|
||||
log_message "Total data sources found: $total_datasources"
|
||||
log_message "Migration candidates found: $migration_candidates"
|
||||
log_message "Read-only candidates (will be skipped): $readonly_candidates"
|
||||
log_message "Updateable candidates: $updateable_candidates"
|
||||
log_message "=========================="
|
||||
}
|
||||
|
||||
# Main function to remove Prometheus type migration
|
||||
remove_prometheus_type_migration() {
|
||||
log_message "Starting remove Azure Prometheus migration"
|
||||
log_message "Log file: $LOG_FILE"
|
||||
log_message "Grafana URL: $GRAFANA_URL"
|
||||
|
||||
response=$(curl -s -w "\n%{http_code}" -X GET \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $BEARER_TOKEN" \
|
||||
"$GRAFANA_URL/api/datasources/")
|
||||
|
||||
http_code=$(echo "$response" | tail -n1)
|
||||
response_body=$(echo "$response" | sed '$d')
|
||||
|
||||
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
|
||||
log_message "Successfully fetched data sources"
|
||||
get_summary_stats "$response_body"
|
||||
update_data_source_type "$response_body"
|
||||
log_message "Migration process completed"
|
||||
else
|
||||
log_message "error fetching data sources: HTTP $http_code - $response_body"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to initialize log file
|
||||
initialize_log() {
|
||||
echo "=== Grafana Azure Prometheus Migration Log ===" > "$LOG_FILE"
|
||||
echo "Started at: $(date)" >> "$LOG_FILE"
|
||||
echo "=============================================" >> "$LOG_FILE"
|
||||
echo "" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Check if jq is installed
|
||||
if ! command -v jq &> /dev/null; then
|
||||
echo "Error: jq is required but not installed. Please install jq to run this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if required variables are set
|
||||
if [[ -z "$GRAFANA_URL" || -z "$BEARER_TOKEN" ]]; then
|
||||
echo "Error: Please set GRAFANA_URL and BEARER_TOKEN variables at the top of the script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Initialize log file
|
||||
initialize_log
|
||||
|
||||
# Execute main function
|
||||
log_message "Script started"
|
||||
remove_prometheus_type_migration
|
||||
log_message "Script completed"
|
||||
|
||||
# Final log message
|
||||
echo ""
|
||||
echo "Migration completed. Full log available at: $LOG_FILE"
|
||||
```
|
||||
|
||||
If you continue to experience issues, check the Grafana server logs for detailed error messages and contact [Grafana Support](https://grafana.com/help/) with your troubleshooting results.
|
|
@ -34,7 +34,7 @@ After you localize the latency problem to a few exemplar traces, you can combine
|
|||
|
||||
Support for exemplars is available for the Prometheus data source only.
|
||||
After you enable the functionality, exemplar data is available by default.
|
||||
For more information on exemplar configuration and how to enable exemplars, refer to [configuring exemplars in the Prometheus data source](../../datasources/prometheus/configure-prometheus-data-source/#exemplars).
|
||||
For more information on exemplar configuration and how to enable exemplars, refer to the Exemplars section in [Prometheus configuration options](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure/#configuration-options).
|
||||
|
||||
Grafana shows exemplars alongside a metric in the Explore view and in dashboards.
|
||||
Each exemplar displays as a highlighted star.
|
||||
|
|
|
@ -37,7 +37,7 @@ For an integrated, UI-driven Git workflow focused on dashboards, explore Git Syn
|
|||
- Connect folders or entire Grafana instances directly to a GitHub repository to synchronize dashboard definitions, enabling version control, branching, and pull requests directly from Grafana.
|
||||
- Git Sync offers a simple, out-of-the-box approach for managing dashboards as code.
|
||||
{{< admonition type="note" >}}
|
||||
Git Sync is an **experimental feature** in Grafana 12, available in Grafana OSS and Enterprise [nightly releases](https://grafana.com/grafana/download/nightly). It is not yet available in Grafana Cloud.
|
||||
Git Sync is available in **private preview** for Grafana Cloud, and it's an **experimental feature** in Grafana 12, available in Grafana OSS and Enterprise [nightly releases](https://grafana.com/grafana/download/nightly).
|
||||
{{< /admonition >}}
|
||||
|
||||
Refer to the [Git Sync documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/) to learn more.
|
||||
|
|
|
@ -18,16 +18,18 @@ weight: 300
|
|||
# Provision resources and sync dashboards
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
|
||||
|
||||
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud. Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided. You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
|
||||
Git Sync and local file provisioning are [experimental features](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
Provisioning is an experimental feature that allows you to configure how to store your dashboard JSONs and other files in GitHub repositories using either Git Sync or a local path.
|
||||
Provisioning allows you to configure how to store your dashboard JSON and other files in GitHub repositories using either Git Sync or a local path.
|
||||
|
||||
Of the two options, **Git Sync** is the favorited method for provisioning your dashboards. You can synchronize any new dashboards and changes to existing dashboards from the UI to your configured GitHub repository. If you push a change in the repository, those changes are mirrored in your Grafana instance. See [Git Sync workflow](#git-sync-workflow).
|
||||
Of the two options, **Git Sync** is the favorited method for provisioning your dashboards. You can synchronize any new dashboards and changes to existing dashboards from the UI to your configured GitHub repository. If you push a change in the repository, those changes are mirrored in your Grafana instance. Refer to [Git Sync workflow](#git-sync-workflow) for more information.
|
||||
|
||||
Alternatively, **local file provisioning** allows you to include in your Grafana instance resources (such as folders and dashboard JSON files) that are stored in a local file system. See [Local file workflow](local-file-workflow).
|
||||
Alternatively, **local file provisioning** allows you to include in your Grafana instance resources (such as folders and dashboard JSON files) that are stored in a local file system. Refer to [Local file workflow](#local-file-workflow) for more information.
|
||||
|
||||
## Provisioned folders and connections
|
||||
|
||||
|
@ -40,8 +42,7 @@ You can set a single folder, or multiple folders to a different repository, with
|
|||
In the Git Sync workflow:
|
||||
|
||||
- When you provision resources with Git Sync you can modify them from within the Grafana UI or within the GitHub repository. Changes made in either the repository or the Grafana UI are bidirectional.
|
||||
- Any changes made in the provisioned files stored in the GitHub repository are reflected in the Grafana database. By default, Grafana polls GitHub every 60 seconds.
|
||||
- The Grafana UI reads from the database and updates the UI to reflect these changes.
|
||||
- Any changes made in the provisioned files stored in the GitHub repository are reflected in the Grafana database. By default, Grafana polls GitHub every 60 seconds. The Grafana UI reads from the database and updates the UI to reflect these changes.
|
||||
|
||||
For example, if you update a dashboard within the Grafana UI and click **Save** to preserve the changes, you'll be notified that the dashboard is provisioned in a GitHub repository. Next you'll be prompted to choose how to preserve the changes: either directly to a branch, or pushed to a new branch using a pull request in GitHub.
|
||||
|
||||
|
@ -52,8 +53,7 @@ For more information, see [Introduction to Git Sync](https://grafana.com/docs/gr
|
|||
In the local file workflow:
|
||||
|
||||
- All provisioned resources are changed in the local files.
|
||||
- Any changes made in the provisioned files are reflected in the Grafana database.
|
||||
- The Grafana UI reads the database and updates the UI to reflect these changes.
|
||||
- Any changes made in the provisioned files are reflected in the Grafana database. The Grafana UI reads the database and updates the UI to reflect these changes.
|
||||
- You can't use the Grafana UI to edit or delete provisioned resources.
|
||||
|
||||
Learn more in [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/).
|
||||
|
|
|
@ -16,9 +16,8 @@ weight: 200
|
|||
# Set up file provisioning
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Local file provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
|
||||
|
||||
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
Local file provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions, but it's **not available in Grafana Cloud**. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
|
@ -48,10 +47,14 @@ Refer to [Provision Grafana](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/
|
|||
### Limitations
|
||||
|
||||
- A provisioned dashboard can't be deleted from within Grafana UI. The dashboard has to be deleted at the local file system and those changes synced to Grafana.
|
||||
- Changes from the local file system are one way: you can't save changes from the UI to GitHub.
|
||||
- Changes from the local file system are one way: you can't save changes from the Grafana UI to GitHub.
|
||||
|
||||
## Before you begin
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature.
|
||||
{{< /admonition >}}
|
||||
|
||||
To set up file provisioning, you need:
|
||||
|
||||
- Administration rights in your Grafana organization.
|
||||
|
@ -122,15 +125,15 @@ The set up process verifies the path and provides an error message if a problem
|
|||
|
||||
### Choose what to synchronize
|
||||
|
||||
In this section, you determine the actions taken with the storage you selected.
|
||||
Choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
|
||||
|
||||
1. Select how resources should be handled in Grafana.
|
||||
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. With this option, all of your dashboards are synced to that one repository. You can only have one provisioned connection with this selection, and you won't have the option of setting up additional repositories to connect to.
|
||||
|
||||
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. You can only have one provisioned connection with this selection.
|
||||
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 folders. - Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
|
||||
<!-- - Select **Migrate instance to repository** to migrate the Grafana instance to the repository. This option is not available during the first time you set up remote provisioning. -->
|
||||
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections.
|
||||
|
||||
1. Select **Synchronize** to continue.
|
||||
Next, enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
|
||||
|
||||
Click **Synchronize** to continue.
|
||||
|
||||
### Synchronize with external storage
|
||||
|
||||
|
|
|
@ -16,50 +16,60 @@ weight: 100
|
|||
# Set up Git Sync
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
|
||||
|
||||
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud, and is an [experimental feature](https://grafana.com/docs/release-life-cycle/) in Grafana v12 for open source and Enterprise editions.
|
||||
|
||||
Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided.
|
||||
|
||||
You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
Git Sync lets you manage Grafana dashboards as code by storing dashboards JSON files and folders in a remote GitHub repository.
|
||||
Alternatively, you can configure a local file system instead of using GitHub.
|
||||
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for information.
|
||||
Git Sync lets you manage Grafana dashboards as code by storing dashboard JSON files and folders in a remote GitHub repository.
|
||||
|
||||
This page explains how to use Git Sync with a GitHub repository.
|
||||
To set up Git Sync and synchronize with a GitHub repository follow these steps:
|
||||
|
||||
To set up Git Sync, you need to:
|
||||
1. [Enable feature toggles in Grafana](#enable-required-feature-toggles) (first time set up).
|
||||
1. [Create a GitHub access token](#create-a-github-access-token).
|
||||
1. [Configure a connection to your GitHub repository](#set-up-the-connection-to-github).
|
||||
1. [Choose what content to sync with Grafana](#choose-what-to-synchronize).
|
||||
|
||||
1. Enable feature toggles in Grafana (first time set up).
|
||||
1. Configure a connection to your GitHub repository.
|
||||
1. Choose what content to sync with Grafana.
|
||||
1. Optional: Extend Git Sync by enabling pull request notifications and image previews of dashboard changes.
|
||||
Optionally, you can [extend Git Sync](#configure-webhooks-and-image-rendering) by enabling pull request notifications and image previews of dashboard changes.
|
||||
|
||||
| Capability | Benefit | Requires |
|
||||
| ----------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- |
|
||||
| Adds a table summarizing changes to your pull request | Provides a convenient way to save changes back to GitHub. | Webhooks configured |
|
||||
| Add a dashboard preview image to a PR | View a snapshot of dashboard changes to a pull request without opening Grafana. | Image renderer plugin and webhooks configured |
|
||||
| Capability | Benefit | Requires |
|
||||
| ----------------------------------------------------- | ------------------------------------------------------------------------------- | -------------------------------------- |
|
||||
| Adds a table summarizing changes to your pull request | Provides a convenient way to save changes back to GitHub. | Webhooks configured |
|
||||
| Add a dashboard preview image to a PR | View a snapshot of dashboard changes to a pull request without opening Grafana. | Image renderer and webhooks configured |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
|
||||
Alternatively, you can configure a local file system instead of using GitHub. Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for more information.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
## Performance impacts of enabling Git Sync
|
||||
|
||||
Git Sync is an experimental feature and is under continuous development.
|
||||
Git Sync is an experimental feature and is under continuous development. Reporting any issues you encounter can help us improve Git Sync.
|
||||
|
||||
We recommend evaluating the performance impact, if any, in a non-production environment.
|
||||
|
||||
When Git Sync is enabled, the database load might increase, especially for instances with a lot of folders and nested folders.
|
||||
Reporting any issues you encounter can help us improve Git Sync.
|
||||
When Git Sync is enabled, the database load might increase, especially for instances with a lot of folders and nested folders. Evaluate the performance impact, if any, in a non-production environment.
|
||||
|
||||
## Before you begin
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
|
||||
Refer to [Known limitations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#known-limitations/) before using Git Sync.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
To set up Git Sync, you need:
|
||||
|
||||
- Administration rights in your Grafana organization.
|
||||
- Enable the required feature toggles in your Grafana instance. Refer to [Enable required feature toggles](#enable-required-feature-toggles) for instructions.
|
||||
- A GitHub repository to store your dashboards in.
|
||||
- If you want to use a local file path, refer to [the local file path guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/).
|
||||
- A GitHub access token. The Grafana UI will also explain this to you as you set it up.
|
||||
- A GitHub access token. The Grafana UI will prompt you during setup.
|
||||
- Optional: A public Grafana instance.
|
||||
- Optional: Image Renderer plugin to save image previews with your PRs.
|
||||
- Optional: The [Image Renderer service](https://github.com/grafana/grafana-image-renderer) to save image previews with your PRs.
|
||||
|
||||
## Enable required feature toggles
|
||||
|
||||
|
@ -118,28 +128,25 @@ To connect your GitHub repository, follow these steps:
|
|||
|
||||
### Choose what to synchronize
|
||||
|
||||
You can choose to either use one repository for an entire organization or to a new Grafana folder (up to 10 connections).
|
||||
If you choose to sync all resources with external storage, then all of your dashboards are synced to that one repository.
|
||||
You won't have the option of setting up additional repositories to connect to.
|
||||
{{< admonition type="caution" >}}
|
||||
|
||||
You can choose to synchronize all resources with GitHub or you can sync resources to a new Grafana folder.
|
||||
The options you have depend on the status of your GitHub repository.
|
||||
For example, if you are syncing with a new or empty repository, you won't have an option to migrate dashboards.
|
||||
If you're using Git Sync in Grafana Cloud you can only sync specific folders for the moment. Git Sync will be available for your full instance soon.
|
||||
|
||||
1. Select how resources should be handled in Grafana.
|
||||
{{< /admonition >}}
|
||||
|
||||
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. You can only have one provisioned connection with this selection.
|
||||
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections. - Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
|
||||
<!-- - Select **Migrate instance to repository** to migrate the Grafana instance to the repository. This option is not available during the first time you set up remote provisioning. -->
|
||||
In this step you can decide which elements to synchronize. Keep in mind the available options depend on the status of your GitHub repository. The first time you connect Grafana with a GitHub repository, you need to synchronize with external storage. If you are syncing with a new or empty repository, you won't have an option to migrate dashboards.
|
||||
|
||||
1. Select **Synchronize** to continue.
|
||||
1. Choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
|
||||
|
||||
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. With this option, all of your dashboards are synced to that one repository. You can only have one provisioned connection with this selection, and you won't have the option of setting up additional repositories to connect to.
|
||||
|
||||
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections.
|
||||
|
||||
1. Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
|
||||
1. Click **Synchronize** to continue.
|
||||
|
||||
<!-- This is only relevant if we include the "Migrate instance to repository" option above. -->
|
||||
<!-- ### Synchronize with external storage
|
||||
|
||||
The first time you connect Grafana with a GitHub repository, you need to synchronize with external storage.
|
||||
Future updates will be automatically saved to the repository and provisioned back to the instance.
|
||||
|
||||
{{< admonition type="note">}}
|
||||
During the synchronization process, your dashboards will be temporarily unavailable.
|
||||
No data or configuration will be lost.
|
||||
|
@ -157,15 +164,14 @@ Finally, you can set up how often your configured storage is polled for updates.
|
|||
1. For **Update instance interval (seconds)**, enter how often you want the instance to pull updates from GitHub. The default value is 60 seconds.
|
||||
1. Optional: Select **Read only** to ensure resources can't be modified in Grafana.
|
||||
<!-- No workflow option listed in the UI. 1. For **Workflows**, select the GitHub workflows that you want to allow to run in the repository. Both **Branch** and **Write** are selected by default. -->
|
||||
1. Optional: If you have the Grafana Image Renderer plugin configured, you can **Enable dashboards previews in pull requests**. If image rendering is not available, then you can't select this option. For more information, refer to [Grafana Image Renderer](https://grafana.com/grafana/plugins/grafana-image-renderer/).
|
||||
1. Optional: If you have the Grafana Image Renderer plugin configured, you can **Enable dashboards previews in pull requests**. If image rendering is not available, then you can't select this option. For more information, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
|
||||
1. Select **Finish** to proceed.
|
||||
|
||||
## Verify your dashboards in Grafana
|
||||
|
||||
To verify that your dashboards are available at the location that you specified, click **Dashboards**. The name of the dashboard is listed in the **Name** column.
|
||||
|
||||
Now that your dashboards have been synced from a repository, you can customize the name, change the branch, and create a pull request (PR) for it.
|
||||
Refer to [Use Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/) for more information.
|
||||
Now that your dashboards have been synced from a repository, you can customize the name, change the branch, and create a pull request (PR) for it. Refer to [Manage provisioned repositories with Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/) for more information.
|
||||
|
||||
## Configure webhooks and image rendering
|
||||
|
||||
|
@ -214,8 +220,7 @@ The necessary paths required to be exposed are (RegExp):
|
|||
By setting up image rendering, you can add visual previews of dashboard updates directly in pull requests.
|
||||
Image rendering also requires webhooks.
|
||||
|
||||
You can enable this capability by installing the Grafana Image Renderer plugin in your Grafana instance.
|
||||
For more information and installation instructions, refer to [Grafana Image Renderer](https://grafana.com/grafana/plugins/grafana-image-renderer/).
|
||||
You can enable this capability by installing the Grafana Image Renderer in your Grafana instance. For more information and installation instructions, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
|
||||
|
||||
## Modify configurations after set up is complete
|
||||
|
||||
|
|
|
@ -16,67 +16,86 @@ weight: 100
|
|||
# Introduction to Git Sync
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
|
||||
|
||||
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud, and is an [experimental feature](https://grafana.com/docs/release-life-cycle/) in Grafana v12 for open source and Enterprise editions.
|
||||
|
||||
Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided.
|
||||
|
||||
You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
Git Sync in Grafana lets you manage your dashboards as code as JSON files stored in GitHub. You and your team can version control, collaborate, and automate deployments efficiently.
|
||||
|
||||
Using Git Sync, you can:
|
||||
|
||||
- Manage dashboard configuration outside of Grafana instances using Git
|
||||
- Introduce a review process for creating and modifying dashboards
|
||||
- Manage dashboard configuration outside of Grafana instances
|
||||
- Replicate dashboards across multiple instances
|
||||
|
||||
Whenever a dashboard is modified, Grafana can commit changes to Git upon saving. Users can configure settings to either enforce PR approvals before merging or allow direct commits.
|
||||
|
||||
Users can push changes directly to GitHub and see them in Grafana. Similarly, automated workflows can do changes that will be automatically represented in Grafana by updating Git.
|
||||
|
||||
Because the dashboards are defined in JSON files, you can enable as-code workflows where the JSON is output from Go, TypeScript, or another coding language in the format of a dashboard schema.
|
||||
|
||||
To learn more about creating dashboards in a coding language to provision them for Git Sync, refer to the [Foundation SDK](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/foundation-sdk) documentation.
|
||||
|
||||
## How it works
|
||||
|
||||
Git Sync is bidirectional and also works with changes done directly in GitHub as well as within the Grafana UI.
|
||||
Grafana periodically polls GitHub at a regular internal to synchronize any changes.
|
||||
With the webhooks feature enabled, repository notifications appear almost immediately.
|
||||
Without webhooks, Grafana polls for changes at the specified interval.
|
||||
The default polling interval is 60 seconds.
|
||||
Git Sync is bidirectional and works both with changes done directly in GitHub as well as in the Grafana UI.
|
||||
|
||||
Any changes made in the provisioned files stored in the GitHub repository are reflected in the Grafana database.
|
||||
The Grafana UI reads the database and updates the UI to reflect these changes.
|
||||
### Make changes in Grafana
|
||||
|
||||
Whenever you modify a dashboard directly from the UI, Grafana can commit changes to Git upon saving. You can configure settings to either enforce PR approvals before merging in your repository, or allow direct commits.
|
||||
|
||||
Grafana periodically polls GitHub at a regular internal to synchronize any changes. The default polling interval is 60 seconds, and you can change this setting in the Grafana UI.
|
||||
|
||||
- If you enable the [webhooks feature](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/#configure-webhooks-and-image-rendering), repository notifications appear almost immediately.
|
||||
- Without webhooks, Grafana polls for changes at the specified interval.
|
||||
|
||||
### Make changes in your GitHub repositories
|
||||
|
||||
With Git Sync, you can make changes in your provisioned files in GitHub and see them in Grafana. Automated workflows ensure those changes are automatically represented in the Grafana database by updating Git. The Grafana UI reads the database and updates the UI to reflect these changes.
|
||||
|
||||
## Known limitations
|
||||
|
||||
Git Sync is under development and the following limitations apply:
|
||||
|
||||
- You can only authenticate in GitHub using your Personal Access Token token.
|
||||
- Support for native Git, Git app, and other providers, such as GitLab or Bitbucket, is on the roadmap.
|
||||
- If you're using Git Sync in Grafana Cloud you can only sync specific folders for the moment. Git Sync will be available for your full instance soon.
|
||||
- Restoring resources from the UI is currently not possible. As an alternative, you can restore dashboards directly in your GitHub repository by raising a PR, and they will be updated in Grafana.
|
||||
|
||||
## Common use cases
|
||||
|
||||
Git Sync in Grafana lets you manage dashboards as code.
|
||||
Because your dashboard JSON files are stored in GitHub, you and your team can version control, collaborate, and automate deployments efficiently.
|
||||
You can use Git Sync in the following scenarios.
|
||||
|
||||
### Version control and auditing
|
||||
|
||||
Organizations can maintain a structured, version-controlled history of Grafana dashboards.
|
||||
The version control lets you revert to previous versions when necessary, compare modifications across commits, and ensure transparency in dashboard management.
|
||||
Organizations can maintain a structured, version-controlled history of Grafana dashboards. The version control lets you revert to previous versions when necessary, compare modifications across commits, and ensure transparency in dashboard management.
|
||||
|
||||
Additionally, having a detailed history of changes enhances compliance efforts, as teams can generate audit logs that document who made changes, when they were made, and why.
|
||||
|
||||
### Automated deployment and CI/CD integration
|
||||
|
||||
Teams can streamline their workflow by integrating dashboard updates into their CI/CD pipelines.
|
||||
By pushing changes to GitHub, automated processes can trigger validation checks, test dashboard configurations, and deploy updates programmatically using the `grafanactl` CLI and Foundation SDK.
|
||||
Teams can streamline their workflow by integrating dashboard updates into their CI/CD pipelines. By pushing changes to GitHub, automated processes can trigger validation checks, test dashboard configurations, and deploy updates programmatically using the `grafanactl` CLI and Foundation SDK.
|
||||
|
||||
This reduces the risk of human errors, ensures consistency across environments, and enables a faster, more reliable release cycle for dashboards used in production monitoring and analytics.
|
||||
|
||||
### Collaborative dashboard development
|
||||
|
||||
With Git Sync, multiple users can work on dashboards simultaneously without overwriting each other’s modifications.
|
||||
By leveraging pull requests and branch-based workflows, teams can submit changes for review before merging them into the main branch. This process not only improves quality control but also ensures that dashboards adhere to best practices and organizational standards. Additionally, GitHub’s built-in discussion and review tools facilitate effective collaboration, making it easier to address feedback before changes go live.
|
||||
By leveraging pull requests and branch-based workflows, teams can submit changes for review before merging them into the main branch. This process not only improves quality control but also ensures that dashboards adhere to best practices and organizational standards.
|
||||
|
||||
Additionally, GitHub’s built-in discussion and review tools facilitate effective collaboration, making it easier to address feedback before changes go live.
|
||||
|
||||
### Multi-environment synchronization
|
||||
|
||||
Enterprises managing multiple Grafana instances, such as development, staging, and production environments, can seamlessly sync dashboards across these instances.
|
||||
This ensures consistency in visualization and monitoring configurations, reducing discrepancies that might arise from manually managing dashboards in different environments.
|
||||
Enterprises managing multiple Grafana instances, such as development, staging, and production environments, can seamlessly sync dashboards across these instances. This ensures consistency in visualization and monitoring configurations, reducing discrepancies that might arise from manually managing dashboards in different environments.
|
||||
|
||||
By using Git Sync, teams can automate deployments across environments, eliminating repetitive setup tasks and maintaining a standardized monitoring infrastructure across the organization.
|
||||
|
||||
### Disaster recovery and backup
|
||||
|
||||
By continuously syncing dashboards to GitHub, organizations can create an always-updated backup, ensuring dashboards are never lost due to accidental deletion or system failures.
|
||||
If an issue arises--such as a corrupted dashboard, unintended modification, or a system crash--teams can quickly restore the latest functional version from the Git repository.
|
||||
This not only minimizes downtime but also adds a layer of resilience to Grafana monitoring setups, ensuring critical dashboards remain available when needed.
|
||||
|
||||
If an issue arises, such as a corrupted dashboard, unintended modification, or a system crash, teams can quickly restore the latest functional version from the Git repository. This not only minimizes downtime but also adds a layer of resilience to Grafana monitoring setups, ensuring critical dashboards remain available when needed.
|
||||
|
||||
## Provision dashboards as code
|
||||
|
||||
Because dashboards are defined in JSON files, you can enable as-code workflows where the JSON file is an output from Go, TypeScript, or another coding language in the format of a dashboard schema.
|
||||
|
||||
To learn more about creating dashboards in a coding language to provision them for Git Sync, refer to the [Foundation SDK](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/foundation-sdk) documentation.
|
||||
|
|
|
@ -16,9 +16,10 @@ weight: 300
|
|||
# Work with provisioned dashboards
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Git Sync and File path provisioning an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana. These features aren't available publicly in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
|
||||
|
||||
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud. Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided. You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
|
||||
Git Sync and local file provisioning are [experimental features](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
|
@ -30,16 +31,17 @@ For more information, refer to the [Dashboards](https://grafana.com/docs/grafana
|
|||
|
||||
Dashboards and folders synchronized using Git Sync or a local file path are referred to as "provisioned" resources.
|
||||
|
||||
Of the two experimental options, Git Sync is the recommended method for provisioning your dashboards.
|
||||
### Git Sync provisioning
|
||||
|
||||
Of the two experimental options, **Git Sync** is the recommended method for provisioning your dashboards.
|
||||
You can synchronize any new dashboards and changes to existing dashboards to your configured GitHub repository.
|
||||
If you push a change in the repository, those changes are mirrored in your Grafana instance.
|
||||
For more information on configuring Git Sync, refer to [Set up Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/).
|
||||
|
||||
For more information on configuring Git Sync, refer to [Introduction to Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/).
|
||||
|
||||
### Local path provisioning
|
||||
|
||||
Using the local path provisioning makes files from a specified path available within Grafana.
|
||||
These provisioned resources can only be modified in the local files and not within Grafana.
|
||||
Any changes made in the configured local path are updated in Grafana.
|
||||
Local path provisioning makes files from a specified path available within Grafana, and any changes made in the configured local path are updated in Grafana. Note that these provisioned resources can only be modified in the local files and not within Grafana.
|
||||
|
||||
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup) to learn more about the version of local file provisioning in Grafana 12.
|
||||
|
||||
|
@ -114,9 +116,9 @@ Saving changes requires opening a pull request in your GitHub repository.
|
|||
|
||||
### Remove dashboards
|
||||
|
||||
You can remove a provisioned dashboard by deleting the dashboard from the repository.
|
||||
You can remove a provisioned dashboard by deleting the dashboard from the repository. The Grafana UI updates when the changes from the GitHub repository sync.
|
||||
|
||||
Grafana updates when the changes from the GitHub repository sync.
|
||||
To restore a deleted dashboard, raise a PR directly in your GitHub repository. Restoring resources from the UI is currently not possible.
|
||||
|
||||
### Tips
|
||||
|
||||
|
@ -128,9 +130,6 @@ Grafana updates when the changes from the GitHub repository sync.
|
|||
## Manage dashboards provisioned with file provisioning
|
||||
|
||||
To update any resources in the local path, you need to edit the files directly and then save them locally.
|
||||
These changes are synchronized to Grafana.
|
||||
However, you can't create, edit, or delete these resources using the Grafana UI.
|
||||
|
||||
For more information, refer to [How it works](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/).
|
||||
These changes are synchronized to Grafana. However, you can't create, edit, or delete these resources using the Grafana UI.
|
||||
|
||||
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for configuration instructions.
|
||||
|
|
|
@ -19,20 +19,22 @@ weight: 400
|
|||
# Manage provisioned repositories with Git Sync
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
|
||||
|
||||
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud, and is an [experimental feature](https://grafana.com/docs/release-life-cycle/) in Grafana v12 for open source and Enterprise editions.
|
||||
|
||||
Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided.
|
||||
|
||||
You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
After you have set up Git Sync, you can synchronize any changes in your existing dashboards with your configured GitHub repository. Similarly, if you push a change in the repository, those changes are mirrored in your Grafana instance.
|
||||
After you have set up Git Sync, you can synchronize any changes you make in your existing provisioned folders in the UI with your configured GitHub repository. Similarly, if you push a change into your repository, those changes are mirrored in your Grafana instance.
|
||||
|
||||
## View current status of synchronization
|
||||
|
||||
Each repository synchronized with Git Sync has a dashboard that provides a summary of resources, health, pull status, webhook, sync jobs, resources, and files.
|
||||
Use the detailed information accessed in **View** to help troubleshoot and understand the health of your repository's connection with Grafana.
|
||||
When you synchronize a repository, Git Sync also creates a dashboard that provides a summary of resources, health, pull status, webhook, sync jobs, resources, and files.
|
||||
|
||||
To view the current status, follow these steps.
|
||||
Use the **View** section in **Provisioning** to see detailed information about the current status of your sync, understand the health of your repository's connection with Grafana, and [troubleshoot](#troubleshoot-synchronization) possible issues:
|
||||
|
||||
1. Log in to your Grafana server with an account that has the Grafana Admin or Editor flag set.
|
||||
1. Select **Administration** in the left-side menu and then **Provisioning**.
|
||||
|
@ -44,7 +46,7 @@ To view the current status, follow these steps.
|
|||
|
||||
Synchronizing resources from provisioned repositories into your Grafana instance pulls the resources into the selected folder. Existing dashboards with the same `uid` are overwritten.
|
||||
|
||||
To sync changes from your dashboards with your Git repository:
|
||||
To sync changes from your Grafana dashboards with your Git repository:
|
||||
|
||||
1. From the left menu, select **Administration** > **Provisioning**.
|
||||
1. Select **Pull** under the repository you want to sync.
|
||||
|
@ -64,6 +66,12 @@ Refer to [Work with provisioned dashboards](../provisioned-dashboards) for infor
|
|||
|
||||
## Troubleshoot synchronization
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
|
||||
Before you proceed to troubleshoot, understand the [known limitations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#known-limitations/).
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
Monitor the **View** status page for synchronization issues and status updates. Common events include:
|
||||
|
||||
- Sync started
|
||||
|
|
|
@ -71,9 +71,12 @@ test.describe(
|
|||
await test.step('3.Edit and restore default groupBy', async () => {
|
||||
const dashboardPage = await gotoDashboardPage({ uid: DASHBOARD_UNDER_TEST });
|
||||
|
||||
// Wait for the page to load
|
||||
const groupByVariable = getGroupByInput(dashboardPage, selectors);
|
||||
await expect(groupByVariable).toBeVisible();
|
||||
|
||||
const initialSelectedOptionsCount = await groupByValues.count();
|
||||
|
||||
const groupByVariable = getGroupByInput(dashboardPage, selectors);
|
||||
await groupByVariable.click();
|
||||
|
||||
const groupByOption = groupByOptions.nth(1);
|
||||
|
|
|
@ -1856,11 +1856,6 @@
|
|||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/commandPalette/actions/recentScopesActions.ts": {
|
||||
"react-hooks/rules-of-hooks": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/commandPalette/actions/scopeActions.tsx": {
|
||||
"react-hooks/rules-of-hooks": {
|
||||
"count": 4
|
||||
|
@ -3383,11 +3378,6 @@
|
|||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/transformers/editors/GroupByTransformerEditor.tsx": {
|
||||
"@typescript-eslint/consistent-type-assertions": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/transformers/editors/GroupToNestedTableTransformerEditor.tsx": {
|
||||
"no-restricted-syntax": {
|
||||
"count": 1
|
||||
|
@ -4552,11 +4542,6 @@
|
|||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/plugins/panel/logs/types.ts": {
|
||||
"no-barrel-files/no-barrel-files": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/plugins/panel/nodeGraph/Edge.tsx": {
|
||||
"@typescript-eslint/consistent-type-assertions": {
|
||||
"count": 1
|
||||
|
|
2
go.mod
2
go.mod
|
@ -246,6 +246,7 @@ require (
|
|||
github.com/grafana/grafana/apps/plugins v0.0.0 // @grafana/plugins-platform-backend
|
||||
github.com/grafana/grafana/apps/preferences v0.0.0 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/grafana/apps/provisioning v0.0.0 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/grafana/apps/scope v0.0.0 // @grafana/grafana-operator-experience-squad
|
||||
github.com/grafana/grafana/apps/secret v0.0.0 // @grafana/grafana-operator-experience-squad
|
||||
github.com/grafana/grafana/apps/shorturl v0.0.0 // @grafana/sharing-squad
|
||||
github.com/grafana/grafana/pkg/aggregator v0.0.0 // @grafana/grafana-app-platform-squad
|
||||
|
@ -274,6 +275,7 @@ replace (
|
|||
github.com/grafana/grafana/apps/plugins => ./apps/plugins
|
||||
github.com/grafana/grafana/apps/preferences => ./apps/preferences
|
||||
github.com/grafana/grafana/apps/provisioning => ./apps/provisioning
|
||||
github.com/grafana/grafana/apps/scope => ./apps/scope
|
||||
github.com/grafana/grafana/apps/secret => ./apps/secret
|
||||
github.com/grafana/grafana/apps/shorturl => ./apps/shorturl
|
||||
|
||||
|
|
1
go.work
1
go.work
|
@ -18,6 +18,7 @@ use (
|
|||
./apps/plugins
|
||||
./apps/preferences
|
||||
./apps/provisioning
|
||||
./apps/scope
|
||||
./apps/secret
|
||||
./apps/shorturl
|
||||
./pkg/aggregator
|
||||
|
|
|
@ -90,6 +90,7 @@ grafana::codegen:run apps/dashboard/pkg
|
|||
grafana::codegen:run apps/provisioning/pkg
|
||||
grafana::codegen:run apps/folder/pkg
|
||||
grafana::codegen:run apps/preferences/pkg
|
||||
grafana::codegen:run apps/scope/pkg
|
||||
grafana::codegen:run apps/alerting/alertenrichment/pkg
|
||||
|
||||
if [ -d "pkg/extensions/apis" ]; then
|
||||
|
|
|
@ -302,6 +302,7 @@
|
|||
"@locker/near-membrane-shared-dom": "0.14.0",
|
||||
"@msagl/core": "^1.1.19",
|
||||
"@msagl/parser": "^1.1.19",
|
||||
"@openfeature/web-sdk": "^1.6.1",
|
||||
"@opentelemetry/api": "1.9.0",
|
||||
"@opentelemetry/exporter-collector": "0.25.0",
|
||||
"@opentelemetry/semantic-conventions": "1.37.0",
|
||||
|
|
|
@ -1,5 +1,12 @@
|
|||
import { ThemeColors } from './createColors';
|
||||
import { ThemeShadows } from './createShadows';
|
||||
import type { Radii } from './createShape';
|
||||
import type { ThemeSpacingTokens } from './createSpacing';
|
||||
|
||||
interface MenuComponentTokens {
|
||||
borderRadius: keyof Radii;
|
||||
padding: ThemeSpacingTokens;
|
||||
}
|
||||
|
||||
/** @beta */
|
||||
export interface ThemeComponents {
|
||||
|
@ -53,6 +60,7 @@ export interface ThemeComponents {
|
|||
rowHoverBackground: string;
|
||||
rowSelected: string;
|
||||
};
|
||||
menu: MenuComponentTokens;
|
||||
}
|
||||
|
||||
export function createComponents(colors: ThemeColors, shadows: ThemeShadows): ThemeComponents {
|
||||
|
@ -71,6 +79,11 @@ export function createComponents(colors: ThemeColors, shadows: ThemeShadows): Th
|
|||
background: colors.mode === 'dark' ? colors.background.canvas : colors.background.primary,
|
||||
};
|
||||
|
||||
const menu: MenuComponentTokens = {
|
||||
borderRadius: 'default',
|
||||
padding: 0.5,
|
||||
};
|
||||
|
||||
return {
|
||||
height: {
|
||||
sm: 3,
|
||||
|
@ -114,5 +127,6 @@ export function createComponents(colors: ThemeColors, shadows: ThemeShadows): Th
|
|||
rowHoverBackground: colors.action.hover,
|
||||
rowSelected: colors.action.selected,
|
||||
},
|
||||
menu,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -165,6 +165,8 @@ export type PluginExtensionOpenModalOptions = {
|
|||
|
||||
export type PluginExtensionEventHelpers<Context extends object = object> = {
|
||||
context?: Readonly<Context>;
|
||||
// The ID of the extension point that triggered this event
|
||||
extensionPointId: string;
|
||||
// Opens a modal dialog and renders the provided React component inside it
|
||||
openModal: (options: PluginExtensionOpenModalOptions) => void;
|
||||
/**
|
||||
|
|
|
@ -58,6 +58,9 @@
|
|||
"@grafana/faro-web-sdk": "^1.13.2",
|
||||
"@grafana/schema": "12.3.0-pre",
|
||||
"@grafana/ui": "12.3.0-pre",
|
||||
"@openfeature/core": "^1.9.0",
|
||||
"@openfeature/ofrep-web-provider": "^0.3.3",
|
||||
"@openfeature/web-sdk": "^1.6.1",
|
||||
"@types/systemjs": "6.15.3",
|
||||
"history": "4.10.1",
|
||||
"lodash": "4.17.21",
|
||||
|
|
|
@ -27,3 +27,5 @@ export {
|
|||
} from '../services/pluginExtensions/getObservablePluginLinks';
|
||||
|
||||
export { UserStorage } from '../utils/userStorage';
|
||||
|
||||
export { initOpenFeature, evaluateBooleanFlag } from './openFeature';
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
import { OFREPWebProvider } from '@openfeature/ofrep-web-provider';
|
||||
import { OpenFeature } from '@openfeature/web-sdk';
|
||||
|
||||
import { FeatureToggles } from '@grafana/data';
|
||||
|
||||
import { config } from '../../config';
|
||||
|
||||
export type FeatureFlagName = keyof FeatureToggles;
|
||||
|
||||
export async function initOpenFeature() {
|
||||
/**
|
||||
* Note: Currently we don't have a way to override OpenFeature flags for tests or localStorage.
|
||||
* A few improvements we could make:
|
||||
* - When running in tests (unit or e2e?), we could use InMemoryProvider instead
|
||||
* - Use Multi-Provider to combine InMemoryProvider (for localStorage) with OFREPWebProvider
|
||||
* to allow for overrides https://github.com/open-feature/js-sdk-contrib/tree/main/libs/providers/multi-provider
|
||||
*/
|
||||
|
||||
const ofProvider = new OFREPWebProvider({
|
||||
baseUrl: '/apis/features.grafana.app/v0alpha1/namespaces/' + config.namespace,
|
||||
pollInterval: -1, // disable polling
|
||||
timeoutMs: 5_000,
|
||||
});
|
||||
|
||||
await OpenFeature.setProviderAndWait(ofProvider, {
|
||||
targetingKey: config.namespace,
|
||||
namespace: config.namespace,
|
||||
});
|
||||
}
|
||||
|
||||
export function evaluateBooleanFlag(flagName: FeatureFlagName, defaultValue: boolean): boolean {
|
||||
return OpenFeature.getClient().getBooleanValue(flagName, defaultValue);
|
||||
}
|
|
@ -41,6 +41,7 @@ export interface Options {
|
|||
showCommonLabels: boolean;
|
||||
showControls?: boolean;
|
||||
showLabels: boolean;
|
||||
showLogAttributes?: boolean;
|
||||
showLogContextToggle: boolean;
|
||||
showTime: boolean;
|
||||
sortOrder: common.LogsSortOrder;
|
||||
|
|
|
@ -37,7 +37,7 @@ const listFoldersHandler = () =>
|
|||
const limit = parseInt(url.searchParams.get('limit') ?? '1000', 10);
|
||||
const page = parseInt(url.searchParams.get('page') ?? '1', 10);
|
||||
|
||||
const tree = permission === 'Edit' ? mockTreeThatViewersCanEdit : mockTree;
|
||||
const tree = permission?.toLowerCase() === 'edit' ? mockTreeThatViewersCanEdit : mockTree;
|
||||
|
||||
// reconstruct a folder API response from the flat tree fixture
|
||||
const folders = tree
|
||||
|
|
|
@ -92,7 +92,7 @@ export const CollapsableSection = ({
|
|||
{loading ? (
|
||||
<Spinner className={styles.spinner} />
|
||||
) : (
|
||||
<Icon name={isSectionOpen ? 'angle-up' : 'angle-down'} className={styles.icon} />
|
||||
<Icon name={isSectionOpen ? 'angle-down' : 'angle-right'} className={styles.icon} />
|
||||
)}
|
||||
</button>
|
||||
<div className={styles.label} id={`collapse-label-${id}`} data-testid={headerDataTestId}>
|
||||
|
@ -107,17 +107,18 @@ export const CollapsableSection = ({
|
|||
const collapsableSectionStyles = (theme: GrafanaTheme2) => ({
|
||||
header: css({
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
cursor: 'pointer',
|
||||
boxSizing: 'border-box',
|
||||
flexDirection: 'row-reverse',
|
||||
position: 'relative',
|
||||
justifyContent: 'space-between',
|
||||
justifyContent: 'flex-start',
|
||||
fontSize: theme.typography.size.lg,
|
||||
padding: `${theme.spacing(0.5)} 0`,
|
||||
'&:focus-within': getFocusStyles(theme),
|
||||
}),
|
||||
button: css({
|
||||
all: 'unset',
|
||||
marginRight: theme.spacing(1),
|
||||
'&:focus-visible': {
|
||||
outline: 'none',
|
||||
outlineOffset: 'unset',
|
||||
|
@ -141,6 +142,7 @@ const collapsableSectionStyles = (theme: GrafanaTheme2) => ({
|
|||
}),
|
||||
label: css({
|
||||
display: 'flex',
|
||||
flex: '1 1 auto',
|
||||
fontWeight: theme.typography.fontWeightMedium,
|
||||
color: theme.colors.text.maxContrast,
|
||||
}),
|
||||
|
|
|
@ -14,6 +14,9 @@ export interface ErrorBoundaryApi {
|
|||
}
|
||||
|
||||
interface Props {
|
||||
/** Name of the error boundary. Used when reporting errors in Faro. */
|
||||
boundaryName?: string;
|
||||
|
||||
children: (r: ErrorBoundaryApi) => ReactNode;
|
||||
/** Will re-render children after error if recover values changes */
|
||||
dependencies?: unknown[];
|
||||
|
@ -37,10 +40,15 @@ export class ErrorBoundary extends PureComponent<Props, State> {
|
|||
};
|
||||
|
||||
componentDidCatch(error: Error, errorInfo: ErrorInfo) {
|
||||
const logger = this.props.errorLogger ?? faro?.api?.pushError;
|
||||
|
||||
if (logger) {
|
||||
logger(error);
|
||||
if (this.props.errorLogger) {
|
||||
this.props.errorLogger(error);
|
||||
} else {
|
||||
faro?.api?.pushError(error, {
|
||||
type: 'boundary',
|
||||
context: {
|
||||
source: this.props.boundaryName ?? 'unknown',
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
this.setState({ error, errorInfo });
|
||||
|
@ -85,6 +93,9 @@ export class ErrorBoundary extends PureComponent<Props, State> {
|
|||
* @public
|
||||
*/
|
||||
export interface ErrorBoundaryAlertProps {
|
||||
/** Name of the error boundary. Used when reporting errors in Faro. */
|
||||
boundaryName?: string;
|
||||
|
||||
/** Title for the error boundary alert */
|
||||
title?: string;
|
||||
|
||||
|
@ -107,10 +118,10 @@ export class ErrorBoundaryAlert extends PureComponent<ErrorBoundaryAlertProps> {
|
|||
};
|
||||
|
||||
render() {
|
||||
const { title, children, style, dependencies, errorLogger } = this.props;
|
||||
const { title, children, style, dependencies, errorLogger, boundaryName } = this.props;
|
||||
|
||||
return (
|
||||
<ErrorBoundary dependencies={dependencies} errorLogger={errorLogger}>
|
||||
<ErrorBoundary dependencies={dependencies} errorLogger={errorLogger} boundaryName={boundaryName}>
|
||||
{({ error, errorInfo }) => {
|
||||
if (!errorInfo) {
|
||||
return children;
|
||||
|
|
|
@ -25,6 +25,7 @@ export interface MenuProps extends React.HTMLAttributes<HTMLDivElement> {
|
|||
const MenuComp = React.forwardRef<HTMLDivElement, MenuProps>(
|
||||
({ header, children, ariaLabel, onOpen, onClose, onKeyDown, ...otherProps }, forwardedRef) => {
|
||||
const styles = useStyles2(getStyles);
|
||||
const componentTokens = useComponentTokens();
|
||||
|
||||
const localRef = useRef<HTMLDivElement>(null);
|
||||
useImperativeHandle(forwardedRef, () => localRef.current!);
|
||||
|
@ -36,12 +37,11 @@ const MenuComp = React.forwardRef<HTMLDivElement, MenuProps>(
|
|||
{...otherProps}
|
||||
aria-label={ariaLabel}
|
||||
backgroundColor="elevated"
|
||||
borderRadius="default"
|
||||
borderRadius={componentTokens.borderRadius}
|
||||
boxShadow="z3"
|
||||
display="inline-block"
|
||||
onKeyDown={handleKeys}
|
||||
paddingX={0.5}
|
||||
paddingY={0.5}
|
||||
padding={componentTokens.padding}
|
||||
ref={localRef}
|
||||
role="menu"
|
||||
tabIndex={-1}
|
||||
|
@ -70,6 +70,18 @@ export const Menu = Object.assign(MenuComp, {
|
|||
Group: MenuGroup,
|
||||
});
|
||||
|
||||
const useComponentTokens = () =>
|
||||
useStyles2((theme: GrafanaTheme2) => {
|
||||
const {
|
||||
components: { menu },
|
||||
} = theme;
|
||||
|
||||
return {
|
||||
padding: menu.padding,
|
||||
borderRadius: menu.borderRadius,
|
||||
};
|
||||
});
|
||||
|
||||
const getStyles = (theme: GrafanaTheme2) => {
|
||||
return {
|
||||
header: css({
|
||||
|
|
|
@ -6,7 +6,7 @@ import { GrafanaTheme2, LinkTarget } from '@grafana/data';
|
|||
import { t } from '@grafana/i18n';
|
||||
|
||||
import { useStyles2 } from '../../themes/ThemeContext';
|
||||
import { getFocusStyles } from '../../themes/mixins';
|
||||
import { getFocusStyles, getInternalRadius } from '../../themes/mixins';
|
||||
import { IconName } from '../../types/icon';
|
||||
import { Icon } from '../Icon/Icon';
|
||||
import { Stack } from '../Layout/Stack/Stack';
|
||||
|
@ -213,6 +213,8 @@ export const MenuItem = React.memo(
|
|||
MenuItem.displayName = 'MenuItem';
|
||||
|
||||
const getStyles = (theme: GrafanaTheme2) => {
|
||||
const menuPadding = theme.components.menu.padding * theme.spacing.gridSize;
|
||||
|
||||
return {
|
||||
item: css({
|
||||
background: 'none',
|
||||
|
@ -225,7 +227,7 @@ const getStyles = (theme: GrafanaTheme2) => {
|
|||
justifyContent: 'center',
|
||||
padding: theme.spacing(0.5, 1.5),
|
||||
minHeight: theme.spacing(4),
|
||||
borderRadius: theme.shape.radius.default,
|
||||
borderRadius: getInternalRadius(theme, menuPadding, { parentBorderWidth: 0 }),
|
||||
margin: 0,
|
||||
border: 'none',
|
||||
width: '100%',
|
||||
|
|
|
@ -11,9 +11,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
_ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api"
|
||||
|
@ -21,8 +19,10 @@ import (
|
|||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/metrics"
|
||||
"github.com/grafana/grafana/pkg/infra/process"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/server"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/standalone"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
|
@ -111,6 +111,11 @@ func RunServer(opts standalone.BuildInfo, cli *cli.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Initialize tracing early to ensure it's always available for other services
|
||||
if err := tracing.InitTracing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := server.Initialize(
|
||||
cli.Context,
|
||||
cfg,
|
||||
|
|
|
@ -57,4 +57,5 @@ import (
|
|||
_ "github.com/grafana/tempo/pkg/traceql"
|
||||
|
||||
_ "github.com/grafana/grafana/apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1"
|
||||
_ "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1"
|
||||
)
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/grafana/dskit/services"
|
||||
jaegerpropagator "go.opentelemetry.io/contrib/propagators/jaeger"
|
||||
"go.opentelemetry.io/contrib/samplers/jaegerremote"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
@ -27,11 +29,9 @@ import (
|
|||
"go.opentelemetry.io/otel/trace/noop"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/go-kit/log/level"
|
||||
|
||||
"github.com/grafana/dskit/services"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/errutil"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -105,6 +105,23 @@ func ProvideService(tracingCfg *TracingConfig) (*TracingService, error) {
|
|||
return ots, nil
|
||||
}
|
||||
|
||||
// InitTracing initializes the tracing service with the provided configuration.
|
||||
// Used to initialize tracing early to ensure it's always available for other
|
||||
// services, outside of the wire context.
|
||||
func InitTracing(cfg *setting.Cfg) error {
|
||||
tracingCfg, err := ParseTracingConfig(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse tracing config: %w", err)
|
||||
}
|
||||
|
||||
_, err = ProvideService(tracingCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialize tracing: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewNoopTracerService() *TracingService {
|
||||
tp := &noopTracerProvider{TracerProvider: noop.NewTracerProvider()}
|
||||
otel.SetTracerProvider(tp)
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
package preferences
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
|
||||
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
|
||||
)
|
||||
|
||||
func (b *APIBuilder) Mutate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) {
|
||||
switch a.GetOperation() {
|
||||
case admission.Create, admission.Update:
|
||||
// ignore anything that is not CREATE | UPDATE
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
obj := a.GetObject()
|
||||
if obj == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch a.GetResource().Resource {
|
||||
case "stars":
|
||||
stars, ok := obj.(*preferences.Stars)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected stars object: (%T)", obj)
|
||||
}
|
||||
stars.Spec.Normalize()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -26,21 +26,29 @@ func mustTemplate(filename string) *template.Template {
|
|||
|
||||
// Templates.
|
||||
var (
|
||||
sqlStarsQuery = mustTemplate("sql_stars_query.sql")
|
||||
sqlStarsRV = mustTemplate("sql_stars_rv.sql")
|
||||
sqlPreferencesQuery = mustTemplate("sql_preferences_query.sql")
|
||||
sqlPreferencesRV = mustTemplate("sql_preferences_rv.sql")
|
||||
sqlTeams = mustTemplate("sql_teams.sql")
|
||||
sqlDashboardStarsQuery = mustTemplate("sql_dashboard_stars.sql")
|
||||
sqlDashboardStarsRV = mustTemplate("sql_dashboard_stars_rv.sql")
|
||||
sqlHistoryStarsQuery = mustTemplate("sql_history_stars.sql")
|
||||
sqlHistoryStarsInsert = mustTemplate("sql_history_stars_insert.sql")
|
||||
sqlHistoryStarsDelete = mustTemplate("sql_history_stars_delete.sql")
|
||||
sqlPreferencesQuery = mustTemplate("sql_preferences_query.sql")
|
||||
sqlPreferencesRV = mustTemplate("sql_preferences_rv.sql")
|
||||
sqlTeams = mustTemplate("sql_teams.sql")
|
||||
)
|
||||
|
||||
type starQuery struct {
|
||||
sqltemplate.SQLTemplate
|
||||
|
||||
OrgID int64 // >= 1 if UserID != ""
|
||||
UserUID string
|
||||
OrgID int64 // >= 1 if UserID != ""
|
||||
UserUID string
|
||||
UserID int64 // for stars
|
||||
QueryUIDs []string
|
||||
QueryUID string
|
||||
|
||||
StarTable string
|
||||
UserTable string
|
||||
StarTable string
|
||||
UserTable string
|
||||
QueryHistoryStarsTable string
|
||||
QueryHistoryTable string
|
||||
}
|
||||
|
||||
func (r starQuery) Validate() error {
|
||||
|
@ -57,8 +65,10 @@ func newStarQueryReq(sql *legacysql.LegacyDatabaseHelper, user string, orgId int
|
|||
UserUID: user,
|
||||
OrgID: orgId,
|
||||
|
||||
StarTable: sql.Table("star"),
|
||||
UserTable: sql.Table("user"),
|
||||
StarTable: sql.Table("star"),
|
||||
UserTable: sql.Table("user"),
|
||||
QueryHistoryStarsTable: sql.Table("query_history_star"),
|
||||
QueryHistoryTable: sql.Table("query_history"),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,15 @@ func TestStarsQueries(t *testing.T) {
|
|||
return &v
|
||||
}
|
||||
|
||||
getHistoryReq := func(orgId int64, userId int64, stars []string, star string) sqltemplate.SQLTemplate {
|
||||
v := newStarQueryReq(nodb, "", orgId)
|
||||
v.UserID = userId
|
||||
v.QueryUIDs = stars
|
||||
v.QueryUID = star
|
||||
v.SQLTemplate = mocks.NewTestingSQLTemplate()
|
||||
return &v
|
||||
}
|
||||
|
||||
getPreferencesQuery := func(orgId int64, cb func(q *preferencesQuery)) sqltemplate.SQLTemplate {
|
||||
v := newPreferencesQueryReq(nodb, orgId)
|
||||
v.SQLTemplate = mocks.NewTestingSQLTemplate()
|
||||
|
@ -40,7 +49,7 @@ func TestStarsQueries(t *testing.T) {
|
|||
RootDir: "testdata",
|
||||
SQLTemplatesFS: sqlTemplatesFS,
|
||||
Templates: map[*template.Template][]mocks.TemplateTestCase{
|
||||
sqlStarsQuery: {
|
||||
sqlDashboardStarsQuery: {
|
||||
{
|
||||
Name: "all",
|
||||
Data: getStarQuery(0, ""),
|
||||
|
@ -54,12 +63,42 @@ func TestStarsQueries(t *testing.T) {
|
|||
Data: getStarQuery(3, "abc"),
|
||||
},
|
||||
},
|
||||
sqlStarsRV: {
|
||||
sqlDashboardStarsRV: {
|
||||
{
|
||||
Name: "get",
|
||||
Data: getStarQuery(0, ""),
|
||||
},
|
||||
},
|
||||
sqlHistoryStarsQuery: {
|
||||
{
|
||||
Name: "user",
|
||||
Data: getStarQuery(1, "abc"),
|
||||
},
|
||||
},
|
||||
sqlHistoryStarsQuery: {
|
||||
{
|
||||
Name: "org",
|
||||
Data: getStarQuery(1, ""),
|
||||
},
|
||||
},
|
||||
sqlHistoryStarsInsert: {
|
||||
{
|
||||
Name: "add star",
|
||||
Data: getHistoryReq(1, 3, nil, "XXX"),
|
||||
},
|
||||
},
|
||||
sqlHistoryStarsDelete: {
|
||||
{
|
||||
Name: "remove star",
|
||||
Data: getHistoryReq(1, 3, []string{"xxx", "yyy"}, ""),
|
||||
},
|
||||
},
|
||||
sqlHistoryStarsDelete: {
|
||||
{
|
||||
Name: "remove all star",
|
||||
Data: getHistoryReq(1, 3, nil, ""),
|
||||
},
|
||||
},
|
||||
sqlPreferencesQuery: {
|
||||
{
|
||||
Name: "all",
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
pref "github.com/grafana/grafana/pkg/services/preference"
|
||||
"github.com/grafana/grafana/pkg/services/user"
|
||||
"github.com/grafana/grafana/pkg/storage/legacysql"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
|
||||
)
|
||||
|
@ -58,13 +59,16 @@ func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user str
|
|||
|
||||
req := newStarQueryReq(sql, user, orgId)
|
||||
|
||||
q, err := sqltemplate.Execute(sqlStarsQuery, req)
|
||||
q, err := sqltemplate.Execute(sqlDashboardStarsQuery, req)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("execute template %q: %w", sqlStarsQuery.Name(), err)
|
||||
return nil, 0, fmt.Errorf("execute template %q: %w", sqlDashboardStarsQuery.Name(), err)
|
||||
}
|
||||
|
||||
sess := sql.DB.GetSqlxSession()
|
||||
rows, err := sess.Query(ctx, q, req.GetArgs()...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer func() {
|
||||
if rows != nil {
|
||||
_ = rows.Close()
|
||||
|
@ -111,7 +115,7 @@ func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user str
|
|||
// Find the RV unless it is a user query
|
||||
if userUID == "" {
|
||||
req.Reset()
|
||||
q, err = sqltemplate.Execute(sqlStarsRV, req)
|
||||
q, err = sqltemplate.Execute(sqlDashboardStarsRV, req)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("execute template %q: %w", sqlPreferencesRV.Name(), err)
|
||||
}
|
||||
|
@ -132,6 +136,90 @@ func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user str
|
|||
return stars, updated.UnixMilli(), err
|
||||
}
|
||||
|
||||
func (s *LegacySQL) getHistoryStars(ctx context.Context, orgId int64, user string) (map[string][]string, error) {
|
||||
sql, err := s.db(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req := newStarQueryReq(sql, user, orgId)
|
||||
|
||||
q, err := sqltemplate.Execute(sqlHistoryStarsQuery, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execute template %q: %w", sqlHistoryStarsQuery.Name(), err)
|
||||
}
|
||||
|
||||
sess := sql.DB.GetSqlxSession()
|
||||
rows, err := sess.Query(ctx, q, req.GetArgs()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if rows != nil {
|
||||
_ = rows.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
last := user
|
||||
res := make(map[string][]string)
|
||||
buffer := make([]string, 0, 10)
|
||||
var uid string
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&uid, &user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if user != last && len(buffer) > 0 {
|
||||
res[last] = buffer
|
||||
buffer = make([]string, 0, 10)
|
||||
}
|
||||
buffer = append(buffer, uid)
|
||||
last = user
|
||||
}
|
||||
res[last] = buffer
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *LegacySQL) removeHistoryStar(ctx context.Context, user *user.User, stars []string) error {
|
||||
sql, err := s.db(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := newStarQueryReq(sql, "", user.OrgID)
|
||||
req.UserID = user.ID
|
||||
if len(stars) > 0 {
|
||||
req.QueryUIDs = stars
|
||||
}
|
||||
|
||||
q, err := sqltemplate.Execute(sqlHistoryStarsDelete, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("execute template %q: %w", sqlHistoryStarsDelete.Name(), err)
|
||||
}
|
||||
|
||||
sess := sql.DB.GetSqlxSession()
|
||||
_, err = sess.Exec(ctx, q, req.GetArgs()...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *LegacySQL) addHistoryStar(ctx context.Context, user *user.User, star string) error {
|
||||
sql, err := s.db(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := newStarQueryReq(sql, "", user.OrgID)
|
||||
req.UserID = user.ID
|
||||
req.QueryUID = star
|
||||
|
||||
q, err := sqltemplate.Execute(sqlHistoryStarsDelete, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("execute template %q: %w", sqlHistoryStarsDelete.Name(), err)
|
||||
}
|
||||
|
||||
sess := sql.DB.GetSqlxSession()
|
||||
_, err = sess.Exec(ctx, q, req.GetArgs()...)
|
||||
return err
|
||||
}
|
||||
|
||||
// List all defined preferences in an org (valid for admin users only)
|
||||
func (s *LegacySQL) listPreferences(ctx context.Context,
|
||||
ns string, orgId int64,
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
SELECT s.query_uid, u.uid as user_uid
|
||||
FROM {{ .Ident .QueryHistoryStarsTable }} as s
|
||||
JOIN {{ .Ident .QueryHistoryTable }} as h ON s.query_uid = h.uid
|
||||
JOIN {{ .Ident .UserTable }} as u ON s.user_id = u.id
|
||||
WHERE s.org_id = {{ .Arg .OrgID }}
|
||||
{{ if .UserUID }}
|
||||
AND u.uid = {{ .Arg .UserUID }}
|
||||
{{ end }}
|
||||
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc
|
|
@ -0,0 +1,6 @@
|
|||
DELETE FROM {{ .Ident .QueryHistoryStarsTable }}
|
||||
WHERE org_id = {{ .Arg .OrgID }}
|
||||
AND user_id = {{ .Arg .UserID }}
|
||||
{{ if .QueryUIDs }}
|
||||
AND query_uid IN ({{ .ArgList .QueryUIDs }})
|
||||
{{ end }}
|
|
@ -0,0 +1,4 @@
|
|||
INSERT INTO {{ .Ident .QueryHistoryStarsTable }}
|
||||
( query_uid, user_id, org_id )
|
||||
VALUES
|
||||
( {{ .Arg .QueryUID }}, {{ .Arg .UserID }}, {{ .Arg .OrgID }} )
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -12,6 +13,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
|
@ -107,8 +109,13 @@ func (s *DashboardStarsStorage) List(ctx context.Context, options *internalversi
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
history, err := s.sql.getHistoryStars(ctx, ns.OrgID, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range found {
|
||||
list.Items = append(list.Items, asStarsResource(s.namespacer(v.OrgID), &v))
|
||||
list.Items = append(list.Items,
|
||||
asStarsResource(s.namespacer(v.OrgID), &v, history[v.UserUID]))
|
||||
}
|
||||
if rv > 0 {
|
||||
list.ResourceVersion = strconv.FormatInt(rv, 10)
|
||||
|
@ -141,19 +148,25 @@ func (s *DashboardStarsStorage) Get(ctx context.Context, name string, options *m
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
history, err := s.sql.getHistoryStars(ctx, ns.OrgID, owner.Identifier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(found) == 0 || len(found[0].Dashboards) == 0 {
|
||||
return nil, apiserrors.NewNotFound(preferences.StarsResourceInfo.GroupResource(), name)
|
||||
}
|
||||
obj := asStarsResource(ns.Value, &found[0])
|
||||
obj := asStarsResource(ns.Value, &found[0], history[owner.Identifier])
|
||||
return &obj, nil
|
||||
}
|
||||
|
||||
func getDashboardStars(stars *preferences.Stars) []string {
|
||||
func getStars(stars *preferences.Stars, gk schema.GroupKind) []string {
|
||||
if stars == nil || len(stars.Spec.Resource) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
for _, r := range stars.Spec.Resource {
|
||||
if r.Group == "dashboard.grafana.app" && r.Kind == "Dashboard" {
|
||||
if r.Group == gk.Group && r.Kind == gk.Kind {
|
||||
return r.Names
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +174,7 @@ func getDashboardStars(stars *preferences.Stars) []string {
|
|||
}
|
||||
|
||||
// Create implements rest.Creater.
|
||||
func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Stars, old *preferences.Stars) (runtime.Object, error) {
|
||||
func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Stars) (runtime.Object, error) {
|
||||
ns, owner, err := getNamespaceAndOwner(ctx, obj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -177,7 +190,7 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Star
|
|||
return nil, fmt.Errorf("namespace mismatch")
|
||||
}
|
||||
|
||||
stars := getDashboardStars(obj)
|
||||
stars := getStars(obj, schema.GroupKind{Group: "dashboard.grafana.app", Kind: "Dashboard"})
|
||||
if len(stars) == 0 {
|
||||
err = s.stars.DeleteByUser(ctx, user.ID)
|
||||
return &preferences.Stars{ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -232,6 +245,31 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Star
|
|||
changed = true
|
||||
}
|
||||
|
||||
// Apply history stars
|
||||
stars = getStars(obj, schema.GroupKind{Group: "history.grafana.app", Kind: "Query"})
|
||||
res, err := s.sql.getHistoryStars(ctx, user.OrgID, user.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
history := res[user.UID]
|
||||
if !slices.Equal(stars, history) {
|
||||
changed = true
|
||||
if len(stars) == 0 {
|
||||
err = s.sql.removeHistoryStar(ctx, user, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
added, removed, _ := preferences.Changes(history, stars)
|
||||
if len(removed) > 0 {
|
||||
_ = s.sql.removeHistoryStar(ctx, user, nil)
|
||||
}
|
||||
for _, v := range added {
|
||||
_ = s.sql.addHistoryStar(ctx, user, v) // one at a time so duplicates do not fail everything
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if changed {
|
||||
return s.Get(ctx, obj.Name, &metav1.GetOptions{})
|
||||
}
|
||||
|
@ -245,7 +283,7 @@ func (s *DashboardStarsStorage) Create(ctx context.Context, obj runtime.Object,
|
|||
return nil, fmt.Errorf("expected stars object")
|
||||
}
|
||||
|
||||
return s.write(ctx, stars, nil)
|
||||
return s.write(ctx, stars)
|
||||
}
|
||||
|
||||
// Update implements rest.Updater.
|
||||
|
@ -265,13 +303,13 @@ func (s *DashboardStarsStorage) Update(ctx context.Context, name string, objInfo
|
|||
return nil, false, fmt.Errorf("expected stars object")
|
||||
}
|
||||
|
||||
obj, err = s.write(ctx, stars, old.(*preferences.Stars))
|
||||
obj, err = s.write(ctx, stars)
|
||||
return obj, false, err
|
||||
}
|
||||
|
||||
// Delete implements rest.GracefulDeleter.
|
||||
func (s *DashboardStarsStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
|
||||
obj, err := s.write(ctx, &preferences.Stars{ObjectMeta: metav1.ObjectMeta{Name: name}}, nil)
|
||||
obj, err := s.write(ctx, &preferences.Stars{ObjectMeta: metav1.ObjectMeta{Name: name}})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
@ -283,8 +321,8 @@ func (s *DashboardStarsStorage) DeleteCollection(ctx context.Context, deleteVali
|
|||
return nil, fmt.Errorf("not implemented yet")
|
||||
}
|
||||
|
||||
func asStarsResource(ns string, v *dashboardStars) preferences.Stars {
|
||||
return preferences.Stars{
|
||||
func asStarsResource(ns string, v *dashboardStars, history []string) preferences.Stars {
|
||||
stars := preferences.Stars{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("user-%s", v.UserUID),
|
||||
Namespace: ns,
|
||||
|
@ -299,4 +337,13 @@ func asStarsResource(ns string, v *dashboardStars) preferences.Stars {
|
|||
}},
|
||||
},
|
||||
}
|
||||
if len(history) > 0 {
|
||||
stars.Spec.Resource = append(stars.Spec.Resource, preferences.StarsResource{
|
||||
Group: "history.grafana.app",
|
||||
Kind: "Query",
|
||||
Names: history,
|
||||
})
|
||||
}
|
||||
stars.Spec.Normalize()
|
||||
return stars
|
||||
}
|
||||
|
|
6
pkg/registry/apis/preferences/legacy/testdata/mysql--sql_history_stars-org.sql
vendored
Executable file
6
pkg/registry/apis/preferences/legacy/testdata/mysql--sql_history_stars-org.sql
vendored
Executable file
|
@ -0,0 +1,6 @@
|
|||
SELECT s.query_uid, u.uid as user_uid
|
||||
FROM `grafana`.`query_history_star` as s
|
||||
JOIN `grafana`.`query_history` as h ON s.query_uid = h.uid
|
||||
JOIN `grafana`.`user` as u ON s.user_id = u.id
|
||||
WHERE s.org_id = 1
|
||||
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc
|
3
pkg/registry/apis/preferences/legacy/testdata/mysql--sql_history_stars_delete-remove all star.sql
vendored
Executable file
3
pkg/registry/apis/preferences/legacy/testdata/mysql--sql_history_stars_delete-remove all star.sql
vendored
Executable file
|
@ -0,0 +1,3 @@
|
|||
DELETE FROM `grafana`.`query_history_star`
|
||||
WHERE org_id = 1
|
||||
AND user_id = 3
|
4
pkg/registry/apis/preferences/legacy/testdata/mysql--sql_history_stars_insert-add star.sql
vendored
Executable file
4
pkg/registry/apis/preferences/legacy/testdata/mysql--sql_history_stars_insert-add star.sql
vendored
Executable file
|
@ -0,0 +1,4 @@
|
|||
INSERT INTO `grafana`.`query_history_star`
|
||||
( query_uid, user_id, org_id )
|
||||
VALUES
|
||||
( 'XXX', 3, 1 )
|
6
pkg/registry/apis/preferences/legacy/testdata/postgres--sql_history_stars-org.sql
vendored
Executable file
6
pkg/registry/apis/preferences/legacy/testdata/postgres--sql_history_stars-org.sql
vendored
Executable file
|
@ -0,0 +1,6 @@
|
|||
SELECT s.query_uid, u.uid as user_uid
|
||||
FROM "grafana"."query_history_star" as s
|
||||
JOIN "grafana"."query_history" as h ON s.query_uid = h.uid
|
||||
JOIN "grafana"."user" as u ON s.user_id = u.id
|
||||
WHERE s.org_id = 1
|
||||
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc
|
3
pkg/registry/apis/preferences/legacy/testdata/postgres--sql_history_stars_delete-remove all star.sql
vendored
Executable file
3
pkg/registry/apis/preferences/legacy/testdata/postgres--sql_history_stars_delete-remove all star.sql
vendored
Executable file
|
@ -0,0 +1,3 @@
|
|||
DELETE FROM "grafana"."query_history_star"
|
||||
WHERE org_id = 1
|
||||
AND user_id = 3
|
4
pkg/registry/apis/preferences/legacy/testdata/postgres--sql_history_stars_insert-add star.sql
vendored
Executable file
4
pkg/registry/apis/preferences/legacy/testdata/postgres--sql_history_stars_insert-add star.sql
vendored
Executable file
|
@ -0,0 +1,4 @@
|
|||
INSERT INTO "grafana"."query_history_star"
|
||||
( query_uid, user_id, org_id )
|
||||
VALUES
|
||||
( 'XXX', 3, 1 )
|
6
pkg/registry/apis/preferences/legacy/testdata/sqlite--sql_history_stars-org.sql
vendored
Executable file
6
pkg/registry/apis/preferences/legacy/testdata/sqlite--sql_history_stars-org.sql
vendored
Executable file
|
@ -0,0 +1,6 @@
|
|||
SELECT s.query_uid, u.uid as user_uid
|
||||
FROM "grafana"."query_history_star" as s
|
||||
JOIN "grafana"."query_history" as h ON s.query_uid = h.uid
|
||||
JOIN "grafana"."user" as u ON s.user_id = u.id
|
||||
WHERE s.org_id = 1
|
||||
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc
|
3
pkg/registry/apis/preferences/legacy/testdata/sqlite--sql_history_stars_delete-remove all star.sql
vendored
Executable file
3
pkg/registry/apis/preferences/legacy/testdata/sqlite--sql_history_stars_delete-remove all star.sql
vendored
Executable file
|
@ -0,0 +1,3 @@
|
|||
DELETE FROM "grafana"."query_history_star"
|
||||
WHERE org_id = 1
|
||||
AND user_id = 3
|
4
pkg/registry/apis/preferences/legacy/testdata/sqlite--sql_history_stars_insert-add star.sql
vendored
Executable file
4
pkg/registry/apis/preferences/legacy/testdata/sqlite--sql_history_stars_insert-add star.sql
vendored
Executable file
|
@ -0,0 +1,4 @@
|
|||
INSERT INTO "grafana"."query_history_star"
|
||||
( query_uid, user_id, org_id )
|
||||
VALUES
|
||||
( 'XXX', 3, 1 )
|
|
@ -29,7 +29,10 @@ import (
|
|||
"github.com/grafana/grafana/pkg/storage/legacysql"
|
||||
)
|
||||
|
||||
var _ builder.APIGroupBuilder = (*APIBuilder)(nil)
|
||||
var (
|
||||
_ builder.APIGroupBuilder = (*APIBuilder)(nil)
|
||||
_ builder.APIGroupMutation = (*APIBuilder)(nil)
|
||||
)
|
||||
|
||||
type APIBuilder struct {
|
||||
authorizer authorizer.Authorizer
|
||||
|
@ -112,7 +115,7 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stars = &starStorage{store: stars} // wrap List so we only return one value
|
||||
stars = &starStorage{Storage: stars} // wrap List so we only return one value
|
||||
if b.legacyStars != nil && opts.DualWriteBuilder != nil {
|
||||
stars, err = opts.DualWriteBuilder(resource.GroupResource(), b.legacyStars, stars)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
|
||||
|
@ -17,7 +16,7 @@ import (
|
|||
var _ grafanarest.Storage = (*starStorage)(nil)
|
||||
|
||||
type starStorage struct {
|
||||
store grafanarest.Storage
|
||||
grafanarest.Storage
|
||||
}
|
||||
|
||||
// When using list, we really just want to get the value for the single user
|
||||
|
@ -34,7 +33,7 @@ func (s *starStorage) List(ctx context.Context, options *internalversion.ListOpt
|
|||
// Get the single user stars
|
||||
case authlib.TypeUser:
|
||||
stars := &preferences.StarsList{}
|
||||
obj, _ := s.store.Get(ctx, "user-"+user.GetIdentifier(), &v1.GetOptions{})
|
||||
obj, _ := s.Get(ctx, "user-"+user.GetIdentifier(), &v1.GetOptions{})
|
||||
if obj != nil {
|
||||
s, ok := obj.(*preferences.Stars)
|
||||
if ok {
|
||||
|
@ -44,61 +43,6 @@ func (s *starStorage) List(ctx context.Context, options *internalversion.ListOpt
|
|||
return stars, nil
|
||||
|
||||
default:
|
||||
return s.store.List(ctx, options)
|
||||
return s.Storage.List(ctx, options)
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertToTable implements rest.Storage.
|
||||
func (s *starStorage) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*v1.Table, error) {
|
||||
return s.store.ConvertToTable(ctx, obj, tableOptions)
|
||||
}
|
||||
|
||||
// Create implements rest.Storage.
|
||||
func (s *starStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *v1.CreateOptions) (runtime.Object, error) {
|
||||
return s.store.Create(ctx, obj, createValidation, options)
|
||||
}
|
||||
|
||||
// Delete implements rest.Storage.
|
||||
func (s *starStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *v1.DeleteOptions) (runtime.Object, bool, error) {
|
||||
return s.store.Delete(ctx, name, deleteValidation, options)
|
||||
}
|
||||
|
||||
// DeleteCollection implements rest.Storage.
|
||||
func (s *starStorage) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *v1.DeleteOptions, listOptions *internalversion.ListOptions) (runtime.Object, error) {
|
||||
return s.store.DeleteCollection(ctx, deleteValidation, options, listOptions)
|
||||
}
|
||||
|
||||
// Destroy implements rest.Storage.
|
||||
func (s *starStorage) Destroy() {
|
||||
s.store.Destroy()
|
||||
}
|
||||
|
||||
// Get implements rest.Storage.
|
||||
func (s *starStorage) Get(ctx context.Context, name string, options *v1.GetOptions) (runtime.Object, error) {
|
||||
return s.store.Get(ctx, name, options)
|
||||
}
|
||||
|
||||
// GetSingularName implements rest.Storage.
|
||||
func (s *starStorage) GetSingularName() string {
|
||||
return s.store.GetSingularName()
|
||||
}
|
||||
|
||||
// NamespaceScoped implements rest.Storage.
|
||||
func (s *starStorage) NamespaceScoped() bool {
|
||||
return s.store.NamespaceScoped()
|
||||
}
|
||||
|
||||
// New implements rest.Storage.
|
||||
func (s *starStorage) New() runtime.Object {
|
||||
return s.store.New()
|
||||
}
|
||||
|
||||
// NewList implements rest.Storage.
|
||||
func (s *starStorage) NewList() runtime.Object {
|
||||
return s.store.NewList()
|
||||
}
|
||||
|
||||
// Update implements rest.Storage.
|
||||
func (s *starStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *v1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
return s.store.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -110,11 +109,10 @@ func (r *starsREST) Connect(ctx context.Context, name string, _ runtime.Object,
|
|||
return
|
||||
}
|
||||
|
||||
if !apply(&obj.Spec, item, remove) {
|
||||
responder.Object(http.StatusNoContent, &v1.Status{
|
||||
Code: http.StatusNoContent,
|
||||
})
|
||||
return
|
||||
if remove {
|
||||
obj.Spec.Remove(item.group, item.kind, item.id)
|
||||
} else {
|
||||
obj.Spec.Add(item.group, item.kind, item.id)
|
||||
}
|
||||
|
||||
if len(obj.Spec.Resource) == 0 {
|
||||
|
@ -128,9 +126,7 @@ func (r *starsREST) Connect(ctx context.Context, name string, _ runtime.Object,
|
|||
responder.Error(err)
|
||||
return
|
||||
}
|
||||
responder.Object(http.StatusOK, &v1.Status{
|
||||
Code: http.StatusOK,
|
||||
})
|
||||
responder.Object(http.StatusOK, &v1.Status{Code: http.StatusOK})
|
||||
}), nil
|
||||
}
|
||||
|
||||
|
@ -151,49 +147,3 @@ func itemFromPath(urlPath, prefix string) (starItem, error) {
|
|||
id: parts[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func apply(spec *preferences.StarsSpec, item starItem, remove bool) bool {
|
||||
var stars *preferences.StarsResource
|
||||
for idx, v := range spec.Resource {
|
||||
if v.Group == item.group && v.Kind == item.kind {
|
||||
stars = &spec.Resource[idx]
|
||||
}
|
||||
}
|
||||
if stars == nil {
|
||||
if remove {
|
||||
return false
|
||||
}
|
||||
spec.Resource = append(spec.Resource, preferences.StarsResource{
|
||||
Group: item.group,
|
||||
Kind: item.kind,
|
||||
Names: []string{},
|
||||
})
|
||||
stars = &spec.Resource[len(spec.Resource)-1]
|
||||
}
|
||||
|
||||
idx := slices.Index(stars.Names, item.id)
|
||||
if idx < 0 { // not found
|
||||
if remove {
|
||||
return false
|
||||
}
|
||||
stars.Names = append(stars.Names, item.id)
|
||||
} else if remove {
|
||||
stars.Names = append(stars.Names[:idx], stars.Names[idx+1:]...)
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
slices.Sort(stars.Names)
|
||||
|
||||
// Remove the slot if only one value
|
||||
if len(stars.Names) == 0 {
|
||||
tmp := preferences.StarsSpec{}
|
||||
for _, v := range spec.Resource {
|
||||
if v.Group == item.group && v.Kind == item.kind {
|
||||
continue
|
||||
}
|
||||
tmp.Resource = append(tmp.Resource, v)
|
||||
}
|
||||
spec.Resource = tmp.Resource
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -4,194 +4,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
|
||||
)
|
||||
|
||||
func TestStarsWrite(t *testing.T) {
|
||||
t.Run("apply", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
spec preferences.StarsSpec
|
||||
item starItem
|
||||
remove bool
|
||||
changed bool
|
||||
expect preferences.StarsSpec
|
||||
}{{
|
||||
name: "add to an existing array",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "x",
|
||||
},
|
||||
remove: false,
|
||||
changed: true,
|
||||
expect: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c", "x"}, // added "x"
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "remove from an existing array",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "b",
|
||||
},
|
||||
remove: true,
|
||||
changed: true,
|
||||
expect: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "c"}, // removed "b"
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "add to empty spec",
|
||||
spec: preferences.StarsSpec{},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "a",
|
||||
},
|
||||
remove: false,
|
||||
changed: true,
|
||||
expect: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a"},
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
name: "remove item that does not exist",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"x"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "a",
|
||||
},
|
||||
remove: true,
|
||||
changed: false,
|
||||
}, {
|
||||
name: "add item that already exist",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"x"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "x",
|
||||
},
|
||||
remove: false,
|
||||
changed: false,
|
||||
}, {
|
||||
name: "remove from empty",
|
||||
spec: preferences.StarsSpec{},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "a",
|
||||
},
|
||||
remove: true,
|
||||
changed: false,
|
||||
}, {
|
||||
name: "remove item that does not exist",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a", "b", "c"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "X",
|
||||
},
|
||||
remove: true,
|
||||
changed: false,
|
||||
}, {
|
||||
name: "remove last item",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a"},
|
||||
}},
|
||||
},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "a",
|
||||
},
|
||||
remove: true,
|
||||
changed: true,
|
||||
expect: preferences.StarsSpec{},
|
||||
}, {
|
||||
name: "remove last item (with others)",
|
||||
spec: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g",
|
||||
Kind: "k",
|
||||
Names: []string{"a"},
|
||||
}, {
|
||||
Group: "g2",
|
||||
Kind: "k2",
|
||||
Names: []string{"a"},
|
||||
}}},
|
||||
item: starItem{
|
||||
group: "g",
|
||||
kind: "k",
|
||||
id: "a",
|
||||
},
|
||||
remove: true,
|
||||
changed: true,
|
||||
expect: preferences.StarsSpec{
|
||||
Resource: []preferences.StarsResource{{
|
||||
Group: "g2",
|
||||
Kind: "k2",
|
||||
Names: []string{"a"},
|
||||
}}},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
changed := apply(&tt.spec, tt.item, tt.remove)
|
||||
require.Equal(t, tt.changed, changed)
|
||||
if changed {
|
||||
require.Equal(t, tt.expect, tt.spec)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("path", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
|
@ -585,7 +585,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
|||
ossProvider := guardian.ProvideGuardian()
|
||||
cacheServiceImpl := service9.ProvideCacheService(cacheService, sqlStore, ossProvider)
|
||||
shortURLService := shorturlimpl.ProvideService(sqlStore)
|
||||
queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl)
|
||||
queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl, featureToggles, eventualRestConfigProvider)
|
||||
dashboardService := service7.ProvideDashboardService(featureToggles, dashboardServiceImpl)
|
||||
dashverService := dashverimpl.ProvideService(cfg, sqlStore, dashboardService, featureToggles, k8sHandlerWithFallback)
|
||||
dashboardSnapshotStore := database5.ProvideStore(sqlStore, cfg)
|
||||
|
@ -1182,7 +1182,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
|||
return nil, err
|
||||
}
|
||||
shortURLService := shorturlimpl.ProvideService(sqlStore)
|
||||
queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl)
|
||||
queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl, featureToggles, eventualRestConfigProvider)
|
||||
dashboardService := service7.ProvideDashboardService(featureToggles, dashboardServiceImpl)
|
||||
dashverService := dashverimpl.ProvideService(cfg, sqlStore, dashboardService, featureToggles, k8sHandlerWithFallback)
|
||||
dashboardSnapshotStore := database5.ProvideStore(sqlStore, cfg)
|
||||
|
|
|
@ -56,6 +56,8 @@ func (s *store) Get(ctx context.Context, ID int64) (*auth.ExternalSession, error
|
|||
return externalSession, nil
|
||||
}
|
||||
|
||||
// List returns a list of external sessions that match the given query.
|
||||
// If the result set contains more than one entry, the entries are sorted by ID in descending order.
|
||||
func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery) ([]*auth.ExternalSession, error) {
|
||||
ctx, span := s.tracer.Start(ctx, "externalsession.List")
|
||||
defer span.End()
|
||||
|
@ -65,6 +67,10 @@ func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery)
|
|||
externalSession.ID = query.ID
|
||||
}
|
||||
|
||||
if query.UserID != 0 {
|
||||
externalSession.UserID = query.UserID
|
||||
}
|
||||
|
||||
hash := sha256.New()
|
||||
|
||||
if query.SessionID != "" {
|
||||
|
@ -80,7 +86,7 @@ func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery)
|
|||
|
||||
queryResult := make([]*auth.ExternalSession, 0)
|
||||
err := s.sqlStore.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
return sess.Find(&queryResult, externalSession)
|
||||
return sess.Desc("id").Find(&queryResult, externalSession)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -51,6 +51,7 @@ type UpdateExternalSessionCommand struct {
|
|||
|
||||
type ListExternalSessionQuery struct {
|
||||
ID int64
|
||||
UserID int64
|
||||
NameID string
|
||||
SessionID string
|
||||
}
|
||||
|
|
|
@ -93,7 +93,11 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, id *authn.Ident
|
|||
updateCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
token, refreshErr := s.service.TryTokenRefresh(updateCtx, id, id.SessionToken)
|
||||
token, refreshErr := s.service.TryTokenRefresh(updateCtx, id, &oauthtoken.TokenRefreshMetadata{
|
||||
ExternalSessionID: id.SessionToken.ExternalSessionId,
|
||||
AuthModule: id.GetAuthenticatedBy(),
|
||||
AuthID: id.GetAuthID(),
|
||||
})
|
||||
if refreshErr != nil {
|
||||
if errors.Is(refreshErr, context.Canceled) {
|
||||
return nil, nil
|
||||
|
@ -107,7 +111,7 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, id *authn.Ident
|
|||
ctxLogger.Error("Failed to refresh OAuth access token", "id", id.ID, "error", refreshErr)
|
||||
|
||||
// log the user out
|
||||
if err := s.sessionService.RevokeToken(ctx, id.SessionToken, false); err != nil {
|
||||
if err := s.sessionService.RevokeToken(ctx, id.SessionToken, false); err != nil && !errors.Is(err, auth.ErrUserTokenNotFound) {
|
||||
ctxLogger.Warn("Failed to revoke session token", "id", id.ID, "tokenId", id.SessionToken.Id, "error", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/login"
|
||||
"github.com/grafana/grafana/pkg/services/oauthtoken"
|
||||
"github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest"
|
||||
)
|
||||
|
||||
|
@ -77,6 +78,14 @@ func TestOAuthTokenSync_SyncOAuthTokenHook(t *testing.T) {
|
|||
expectRevokeTokenCalled: false,
|
||||
expectToken: &login.UserAuth{OAuthExpiry: time.Now().Add(10 * time.Minute)},
|
||||
},
|
||||
{
|
||||
desc: "should not invalidate session if token refresh fails with no refresh token",
|
||||
identity: &authn.Identity{ID: "1", Type: claims.TypeUser, SessionToken: &auth.UserToken{}, AuthenticatedBy: login.AzureADAuthModule},
|
||||
expectedTryRefreshErr: oauthtoken.ErrNoRefreshTokenFound,
|
||||
expectTryRefreshTokenCalled: true,
|
||||
expectRevokeTokenCalled: true,
|
||||
expectedErr: oauthtoken.ErrNoRefreshTokenFound,
|
||||
},
|
||||
|
||||
// TODO: address coverage of oauthtoken sync
|
||||
}
|
||||
|
@ -89,7 +98,7 @@ func TestOAuthTokenSync_SyncOAuthTokenHook(t *testing.T) {
|
|||
)
|
||||
|
||||
service := &oauthtokentest.MockOauthTokenService{
|
||||
TryTokenRefreshFunc: func(ctx context.Context, usr identity.Requester, _ *auth.UserToken) (*oauth2.Token, error) {
|
||||
TryTokenRefreshFunc: func(ctx context.Context, usr identity.Requester, _ *oauthtoken.TokenRefreshMetadata) (*oauth2.Token, error) {
|
||||
tryRefreshCalled = true
|
||||
return nil, tt.expectedTryRefreshErr
|
||||
},
|
||||
|
|
|
@ -297,7 +297,9 @@ func (c *OAuth) Logout(ctx context.Context, user identity.Requester, sessionToke
|
|||
|
||||
ctxLogger := c.log.FromContext(ctx).New("userID", userID)
|
||||
|
||||
if err := c.oauthService.InvalidateOAuthTokens(ctx, user, sessionToken); err != nil {
|
||||
if err := c.oauthService.InvalidateOAuthTokens(ctx, user, &oauthtoken.TokenRefreshMetadata{
|
||||
ExternalSessionID: sessionToken.ExternalSessionId,
|
||||
AuthModule: user.GetAuthenticatedBy()}); err != nil {
|
||||
ctxLogger.Error("Failed to invalidate tokens", "error", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -19,10 +19,12 @@ import (
|
|||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/login/social"
|
||||
"github.com/grafana/grafana/pkg/login/social/socialtest"
|
||||
"github.com/grafana/grafana/pkg/models/usertoken"
|
||||
"github.com/grafana/grafana/pkg/services/auth"
|
||||
"github.com/grafana/grafana/pkg/services/authn"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/login"
|
||||
"github.com/grafana/grafana/pkg/services/oauthtoken"
|
||||
"github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest"
|
||||
"github.com/grafana/grafana/pkg/services/org"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
|
@ -481,7 +483,7 @@ func TestOAuth_Logout(t *testing.T) {
|
|||
"id_token": "some.id.token",
|
||||
})
|
||||
},
|
||||
InvalidateOAuthTokensFunc: func(_ context.Context, _ identity.Requester, _ *auth.UserToken) error {
|
||||
InvalidateOAuthTokensFunc: func(_ context.Context, _ identity.Requester, _ *oauthtoken.TokenRefreshMetadata) error {
|
||||
invalidateTokenCalled = true
|
||||
return nil
|
||||
},
|
||||
|
@ -492,7 +494,7 @@ func TestOAuth_Logout(t *testing.T) {
|
|||
}
|
||||
c := ProvideOAuth(authn.ClientWithPrefix("azuread"), tt.cfg, mockService, fakeSocialSvc, &setting.OSSImpl{Cfg: tt.cfg}, featuremgmt.WithFeatures(), tracing.InitializeTracerForTest())
|
||||
|
||||
redirect, ok := c.Logout(context.Background(), &authn.Identity{ID: "1", Type: claims.TypeUser}, nil)
|
||||
redirect, ok := c.Logout(context.Background(), &authn.Identity{ID: "1", Type: claims.TypeUser}, &usertoken.UserToken{})
|
||||
|
||||
assert.Equal(t, tt.expectedOK, ok)
|
||||
if tt.expectedOK {
|
||||
|
|
|
@ -760,12 +760,10 @@ func (s *Service) listPermission(ctx context.Context, scopeMap map[string]bool,
|
|||
cacheHit := false
|
||||
if t.HasFolderSupport() {
|
||||
var err error
|
||||
ok = false
|
||||
if !req.Options.SkipCache {
|
||||
tree, ok = s.getCachedFolderTree(ctx, req.Namespace)
|
||||
cacheHit = true
|
||||
tree, cacheHit = s.getCachedFolderTree(ctx, req.Namespace)
|
||||
}
|
||||
if !ok {
|
||||
if !cacheHit {
|
||||
tree, err = s.buildFolderTree(ctx, req.Namespace)
|
||||
if err != nil {
|
||||
ctxLogger.Error("could not build folder and dashboard tree", "error", err)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
//go:generate mockery --name AuthInfoService --structname MockAuthInfoService --outpkg authinfotest --filename auth_info_service_mock.go --output ./authinfotest/
|
||||
type AuthInfoService interface {
|
||||
GetAuthInfo(ctx context.Context, query *GetAuthInfoQuery) (*UserAuth, error)
|
||||
GetUserLabels(ctx context.Context, query GetUserLabelsQuery) (map[int64]string, error)
|
||||
|
|
|
@ -0,0 +1,765 @@
|
|||
// Code generated by mockery; DO NOT EDIT.
|
||||
// github.com/vektra/mockery
|
||||
// template: testify
|
||||
|
||||
package authinfotest
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/login"
|
||||
"github.com/grafana/grafana/pkg/services/user"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// NewMockAuthInfoService creates a new instance of MockAuthInfoService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockAuthInfoService(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockAuthInfoService {
|
||||
mock := &MockAuthInfoService{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
// MockAuthInfoService is an autogenerated mock type for the AuthInfoService type
|
||||
type MockAuthInfoService struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockAuthInfoService_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockAuthInfoService) EXPECT() *MockAuthInfoService_Expecter {
|
||||
return &MockAuthInfoService_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// DeleteUserAuthInfo provides a mock function for the type MockAuthInfoService
|
||||
func (_mock *MockAuthInfoService) DeleteUserAuthInfo(ctx context.Context, userID int64) error {
|
||||
ret := _mock.Called(ctx, userID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteUserAuthInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = returnFunc(ctx, userID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockAuthInfoService_DeleteUserAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteUserAuthInfo'
|
||||
type MockAuthInfoService_DeleteUserAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DeleteUserAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - userID int64
|
||||
func (_e *MockAuthInfoService_Expecter) DeleteUserAuthInfo(ctx interface{}, userID interface{}) *MockAuthInfoService_DeleteUserAuthInfo_Call {
|
||||
return &MockAuthInfoService_DeleteUserAuthInfo_Call{Call: _e.mock.On("DeleteUserAuthInfo", ctx, userID)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_DeleteUserAuthInfo_Call) Run(run func(ctx context.Context, userID int64)) *MockAuthInfoService_DeleteUserAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 int64
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(int64)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_DeleteUserAuthInfo_Call) Return(err error) *MockAuthInfoService_DeleteUserAuthInfo_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_DeleteUserAuthInfo_Call) RunAndReturn(run func(ctx context.Context, userID int64) error) *MockAuthInfoService_DeleteUserAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetAuthInfo provides a mock function for the type MockAuthInfoService
|
||||
func (_mock *MockAuthInfoService) GetAuthInfo(ctx context.Context, query *login.GetAuthInfoQuery) (*login.UserAuth, error) {
|
||||
ret := _mock.Called(ctx, query)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetAuthInfo")
|
||||
}
|
||||
|
||||
var r0 *login.UserAuth
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.GetAuthInfoQuery) (*login.UserAuth, error)); ok {
|
||||
return returnFunc(ctx, query)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.GetAuthInfoQuery) *login.UserAuth); ok {
|
||||
r0 = returnFunc(ctx, query)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*login.UserAuth)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context, *login.GetAuthInfoQuery) error); ok {
|
||||
r1 = returnFunc(ctx, query)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthInfoService_GetAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAuthInfo'
|
||||
type MockAuthInfoService_GetAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - query *login.GetAuthInfoQuery
|
||||
func (_e *MockAuthInfoService_Expecter) GetAuthInfo(ctx interface{}, query interface{}) *MockAuthInfoService_GetAuthInfo_Call {
|
||||
return &MockAuthInfoService_GetAuthInfo_Call{Call: _e.mock.On("GetAuthInfo", ctx, query)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_GetAuthInfo_Call) Run(run func(ctx context.Context, query *login.GetAuthInfoQuery)) *MockAuthInfoService_GetAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 *login.GetAuthInfoQuery
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*login.GetAuthInfoQuery)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_GetAuthInfo_Call) Return(userAuth *login.UserAuth, err error) *MockAuthInfoService_GetAuthInfo_Call {
|
||||
_c.Call.Return(userAuth, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_GetAuthInfo_Call) RunAndReturn(run func(ctx context.Context, query *login.GetAuthInfoQuery) (*login.UserAuth, error)) *MockAuthInfoService_GetAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserLabels provides a mock function for the type MockAuthInfoService
|
||||
func (_mock *MockAuthInfoService) GetUserLabels(ctx context.Context, query login.GetUserLabelsQuery) (map[int64]string, error) {
|
||||
ret := _mock.Called(ctx, query)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetUserLabels")
|
||||
}
|
||||
|
||||
var r0 map[int64]string
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, login.GetUserLabelsQuery) (map[int64]string, error)); ok {
|
||||
return returnFunc(ctx, query)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, login.GetUserLabelsQuery) map[int64]string); ok {
|
||||
r0 = returnFunc(ctx, query)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[int64]string)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context, login.GetUserLabelsQuery) error); ok {
|
||||
r1 = returnFunc(ctx, query)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthInfoService_GetUserLabels_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserLabels'
|
||||
type MockAuthInfoService_GetUserLabels_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetUserLabels is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - query login.GetUserLabelsQuery
|
||||
func (_e *MockAuthInfoService_Expecter) GetUserLabels(ctx interface{}, query interface{}) *MockAuthInfoService_GetUserLabels_Call {
|
||||
return &MockAuthInfoService_GetUserLabels_Call{Call: _e.mock.On("GetUserLabels", ctx, query)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_GetUserLabels_Call) Run(run func(ctx context.Context, query login.GetUserLabelsQuery)) *MockAuthInfoService_GetUserLabels_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 login.GetUserLabelsQuery
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(login.GetUserLabelsQuery)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_GetUserLabels_Call) Return(int64ToString map[int64]string, err error) *MockAuthInfoService_GetUserLabels_Call {
|
||||
_c.Call.Return(int64ToString, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_GetUserLabels_Call) RunAndReturn(run func(ctx context.Context, query login.GetUserLabelsQuery) (map[int64]string, error)) *MockAuthInfoService_GetUserLabels_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetAuthInfo provides a mock function for the type MockAuthInfoService
|
||||
func (_mock *MockAuthInfoService) SetAuthInfo(ctx context.Context, cmd *login.SetAuthInfoCommand) error {
|
||||
ret := _mock.Called(ctx, cmd)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SetAuthInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.SetAuthInfoCommand) error); ok {
|
||||
r0 = returnFunc(ctx, cmd)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockAuthInfoService_SetAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetAuthInfo'
|
||||
type MockAuthInfoService_SetAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SetAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - cmd *login.SetAuthInfoCommand
|
||||
func (_e *MockAuthInfoService_Expecter) SetAuthInfo(ctx interface{}, cmd interface{}) *MockAuthInfoService_SetAuthInfo_Call {
|
||||
return &MockAuthInfoService_SetAuthInfo_Call{Call: _e.mock.On("SetAuthInfo", ctx, cmd)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_SetAuthInfo_Call) Run(run func(ctx context.Context, cmd *login.SetAuthInfoCommand)) *MockAuthInfoService_SetAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 *login.SetAuthInfoCommand
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*login.SetAuthInfoCommand)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_SetAuthInfo_Call) Return(err error) *MockAuthInfoService_SetAuthInfo_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_SetAuthInfo_Call) RunAndReturn(run func(ctx context.Context, cmd *login.SetAuthInfoCommand) error) *MockAuthInfoService_SetAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateAuthInfo provides a mock function for the type MockAuthInfoService
|
||||
func (_mock *MockAuthInfoService) UpdateAuthInfo(ctx context.Context, cmd *login.UpdateAuthInfoCommand) error {
|
||||
ret := _mock.Called(ctx, cmd)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for UpdateAuthInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.UpdateAuthInfoCommand) error); ok {
|
||||
r0 = returnFunc(ctx, cmd)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockAuthInfoService_UpdateAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateAuthInfo'
|
||||
type MockAuthInfoService_UpdateAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// UpdateAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - cmd *login.UpdateAuthInfoCommand
|
||||
func (_e *MockAuthInfoService_Expecter) UpdateAuthInfo(ctx interface{}, cmd interface{}) *MockAuthInfoService_UpdateAuthInfo_Call {
|
||||
return &MockAuthInfoService_UpdateAuthInfo_Call{Call: _e.mock.On("UpdateAuthInfo", ctx, cmd)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_UpdateAuthInfo_Call) Run(run func(ctx context.Context, cmd *login.UpdateAuthInfoCommand)) *MockAuthInfoService_UpdateAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 *login.UpdateAuthInfoCommand
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*login.UpdateAuthInfoCommand)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_UpdateAuthInfo_Call) Return(err error) *MockAuthInfoService_UpdateAuthInfo_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthInfoService_UpdateAuthInfo_Call) RunAndReturn(run func(ctx context.Context, cmd *login.UpdateAuthInfoCommand) error) *MockAuthInfoService_UpdateAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockStore creates a new instance of MockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockStore(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockStore {
|
||||
mock := &MockStore{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
// MockStore is an autogenerated mock type for the Store type
|
||||
type MockStore struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockStore_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockStore) EXPECT() *MockStore_Expecter {
|
||||
return &MockStore_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// DeleteUserAuthInfo provides a mock function for the type MockStore
|
||||
func (_mock *MockStore) DeleteUserAuthInfo(ctx context.Context, userID int64) error {
|
||||
ret := _mock.Called(ctx, userID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteUserAuthInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = returnFunc(ctx, userID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStore_DeleteUserAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteUserAuthInfo'
|
||||
type MockStore_DeleteUserAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DeleteUserAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - userID int64
|
||||
func (_e *MockStore_Expecter) DeleteUserAuthInfo(ctx interface{}, userID interface{}) *MockStore_DeleteUserAuthInfo_Call {
|
||||
return &MockStore_DeleteUserAuthInfo_Call{Call: _e.mock.On("DeleteUserAuthInfo", ctx, userID)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_DeleteUserAuthInfo_Call) Run(run func(ctx context.Context, userID int64)) *MockStore_DeleteUserAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 int64
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(int64)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_DeleteUserAuthInfo_Call) Return(err error) *MockStore_DeleteUserAuthInfo_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_DeleteUserAuthInfo_Call) RunAndReturn(run func(ctx context.Context, userID int64) error) *MockStore_DeleteUserAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetAuthInfo provides a mock function for the type MockStore
|
||||
func (_mock *MockStore) GetAuthInfo(ctx context.Context, query *login.GetAuthInfoQuery) (*login.UserAuth, error) {
|
||||
ret := _mock.Called(ctx, query)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetAuthInfo")
|
||||
}
|
||||
|
||||
var r0 *login.UserAuth
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.GetAuthInfoQuery) (*login.UserAuth, error)); ok {
|
||||
return returnFunc(ctx, query)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.GetAuthInfoQuery) *login.UserAuth); ok {
|
||||
r0 = returnFunc(ctx, query)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*login.UserAuth)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context, *login.GetAuthInfoQuery) error); ok {
|
||||
r1 = returnFunc(ctx, query)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockStore_GetAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAuthInfo'
|
||||
type MockStore_GetAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - query *login.GetAuthInfoQuery
|
||||
func (_e *MockStore_Expecter) GetAuthInfo(ctx interface{}, query interface{}) *MockStore_GetAuthInfo_Call {
|
||||
return &MockStore_GetAuthInfo_Call{Call: _e.mock.On("GetAuthInfo", ctx, query)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_GetAuthInfo_Call) Run(run func(ctx context.Context, query *login.GetAuthInfoQuery)) *MockStore_GetAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 *login.GetAuthInfoQuery
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*login.GetAuthInfoQuery)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_GetAuthInfo_Call) Return(userAuth *login.UserAuth, err error) *MockStore_GetAuthInfo_Call {
|
||||
_c.Call.Return(userAuth, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_GetAuthInfo_Call) RunAndReturn(run func(ctx context.Context, query *login.GetAuthInfoQuery) (*login.UserAuth, error)) *MockStore_GetAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserLabels provides a mock function for the type MockStore
|
||||
func (_mock *MockStore) GetUserLabels(ctx context.Context, query login.GetUserLabelsQuery) (map[int64]string, error) {
|
||||
ret := _mock.Called(ctx, query)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetUserLabels")
|
||||
}
|
||||
|
||||
var r0 map[int64]string
|
||||
var r1 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, login.GetUserLabelsQuery) (map[int64]string, error)); ok {
|
||||
return returnFunc(ctx, query)
|
||||
}
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, login.GetUserLabelsQuery) map[int64]string); ok {
|
||||
r0 = returnFunc(ctx, query)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(map[int64]string)
|
||||
}
|
||||
}
|
||||
if returnFunc, ok := ret.Get(1).(func(context.Context, login.GetUserLabelsQuery) error); ok {
|
||||
r1 = returnFunc(ctx, query)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockStore_GetUserLabels_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserLabels'
|
||||
type MockStore_GetUserLabels_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetUserLabels is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - query login.GetUserLabelsQuery
|
||||
func (_e *MockStore_Expecter) GetUserLabels(ctx interface{}, query interface{}) *MockStore_GetUserLabels_Call {
|
||||
return &MockStore_GetUserLabels_Call{Call: _e.mock.On("GetUserLabels", ctx, query)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_GetUserLabels_Call) Run(run func(ctx context.Context, query login.GetUserLabelsQuery)) *MockStore_GetUserLabels_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 login.GetUserLabelsQuery
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(login.GetUserLabelsQuery)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_GetUserLabels_Call) Return(int64ToString map[int64]string, err error) *MockStore_GetUserLabels_Call {
|
||||
_c.Call.Return(int64ToString, err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_GetUserLabels_Call) RunAndReturn(run func(ctx context.Context, query login.GetUserLabelsQuery) (map[int64]string, error)) *MockStore_GetUserLabels_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetAuthInfo provides a mock function for the type MockStore
|
||||
func (_mock *MockStore) SetAuthInfo(ctx context.Context, cmd *login.SetAuthInfoCommand) error {
|
||||
ret := _mock.Called(ctx, cmd)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SetAuthInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.SetAuthInfoCommand) error); ok {
|
||||
r0 = returnFunc(ctx, cmd)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStore_SetAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetAuthInfo'
|
||||
type MockStore_SetAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SetAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - cmd *login.SetAuthInfoCommand
|
||||
func (_e *MockStore_Expecter) SetAuthInfo(ctx interface{}, cmd interface{}) *MockStore_SetAuthInfo_Call {
|
||||
return &MockStore_SetAuthInfo_Call{Call: _e.mock.On("SetAuthInfo", ctx, cmd)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_SetAuthInfo_Call) Run(run func(ctx context.Context, cmd *login.SetAuthInfoCommand)) *MockStore_SetAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 *login.SetAuthInfoCommand
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*login.SetAuthInfoCommand)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_SetAuthInfo_Call) Return(err error) *MockStore_SetAuthInfo_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_SetAuthInfo_Call) RunAndReturn(run func(ctx context.Context, cmd *login.SetAuthInfoCommand) error) *MockStore_SetAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateAuthInfo provides a mock function for the type MockStore
|
||||
func (_mock *MockStore) UpdateAuthInfo(ctx context.Context, cmd *login.UpdateAuthInfoCommand) error {
|
||||
ret := _mock.Called(ctx, cmd)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for UpdateAuthInfo")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(context.Context, *login.UpdateAuthInfoCommand) error); ok {
|
||||
r0 = returnFunc(ctx, cmd)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockStore_UpdateAuthInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateAuthInfo'
|
||||
type MockStore_UpdateAuthInfo_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// UpdateAuthInfo is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - cmd *login.UpdateAuthInfoCommand
|
||||
func (_e *MockStore_Expecter) UpdateAuthInfo(ctx interface{}, cmd interface{}) *MockStore_UpdateAuthInfo_Call {
|
||||
return &MockStore_UpdateAuthInfo_Call{Call: _e.mock.On("UpdateAuthInfo", ctx, cmd)}
|
||||
}
|
||||
|
||||
func (_c *MockStore_UpdateAuthInfo_Call) Run(run func(ctx context.Context, cmd *login.UpdateAuthInfoCommand)) *MockStore_UpdateAuthInfo_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 context.Context
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(context.Context)
|
||||
}
|
||||
var arg1 *login.UpdateAuthInfoCommand
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(*login.UpdateAuthInfoCommand)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_UpdateAuthInfo_Call) Return(err error) *MockStore_UpdateAuthInfo_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockStore_UpdateAuthInfo_Call) RunAndReturn(run func(ctx context.Context, cmd *login.UpdateAuthInfoCommand) error) *MockStore_UpdateAuthInfo_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockUserProtectionService creates a new instance of MockUserProtectionService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockUserProtectionService(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockUserProtectionService {
|
||||
mock := &MockUserProtectionService{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
// MockUserProtectionService is an autogenerated mock type for the UserProtectionService type
|
||||
type MockUserProtectionService struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockUserProtectionService_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockUserProtectionService) EXPECT() *MockUserProtectionService_Expecter {
|
||||
return &MockUserProtectionService_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// AllowUserMapping provides a mock function for the type MockUserProtectionService
|
||||
func (_mock *MockUserProtectionService) AllowUserMapping(user1 *user.User, authModule string) error {
|
||||
ret := _mock.Called(user1, authModule)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AllowUserMapping")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if returnFunc, ok := ret.Get(0).(func(*user.User, string) error); ok {
|
||||
r0 = returnFunc(user1, authModule)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockUserProtectionService_AllowUserMapping_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllowUserMapping'
|
||||
type MockUserProtectionService_AllowUserMapping_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AllowUserMapping is a helper method to define mock.On call
|
||||
// - user1 *user.User
|
||||
// - authModule string
|
||||
func (_e *MockUserProtectionService_Expecter) AllowUserMapping(user1 interface{}, authModule interface{}) *MockUserProtectionService_AllowUserMapping_Call {
|
||||
return &MockUserProtectionService_AllowUserMapping_Call{Call: _e.mock.On("AllowUserMapping", user1, authModule)}
|
||||
}
|
||||
|
||||
func (_c *MockUserProtectionService_AllowUserMapping_Call) Run(run func(user1 *user.User, authModule string)) *MockUserProtectionService_AllowUserMapping_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
var arg0 *user.User
|
||||
if args[0] != nil {
|
||||
arg0 = args[0].(*user.User)
|
||||
}
|
||||
var arg1 string
|
||||
if args[1] != nil {
|
||||
arg1 = args[1].(string)
|
||||
}
|
||||
run(
|
||||
arg0,
|
||||
arg1,
|
||||
)
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUserProtectionService_AllowUserMapping_Call) Return(err error) *MockUserProtectionService_AllowUserMapping_Call {
|
||||
_c.Call.Return(err)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockUserProtectionService_AllowUserMapping_Call) RunAndReturn(run func(user1 *user.User, authModule string) error) *MockUserProtectionService_AllowUserMapping_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/go-jose/go-jose/v4/jwt"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
|
@ -57,8 +58,14 @@ var _ OAuthTokenService = (*Service)(nil)
|
|||
type OAuthTokenService interface {
|
||||
GetCurrentOAuthToken(context.Context, identity.Requester, *auth.UserToken) *oauth2.Token
|
||||
IsOAuthPassThruEnabled(*datasources.DataSource) bool
|
||||
TryTokenRefresh(context.Context, identity.Requester, *auth.UserToken) (*oauth2.Token, error)
|
||||
InvalidateOAuthTokens(context.Context, identity.Requester, *auth.UserToken) error
|
||||
TryTokenRefresh(context.Context, identity.Requester, *TokenRefreshMetadata) (*oauth2.Token, error)
|
||||
InvalidateOAuthTokens(context.Context, identity.Requester, *TokenRefreshMetadata) error
|
||||
}
|
||||
|
||||
type TokenRefreshMetadata struct {
|
||||
ExternalSessionID int64
|
||||
AuthModule string
|
||||
AuthID string
|
||||
}
|
||||
|
||||
func ProvideService(socialService social.Service, authInfoService login.AuthInfoService, cfg *setting.Cfg, registerer prometheus.Registerer,
|
||||
|
@ -102,51 +109,71 @@ func (o *Service) GetCurrentOAuthToken(ctx context.Context, usr identity.Request
|
|||
|
||||
ctxLogger = ctxLogger.New("userID", userID)
|
||||
|
||||
if !strings.HasPrefix(usr.GetAuthenticatedBy(), "oauth_") {
|
||||
ctxLogger.Warn("The specified user's auth provider is not oauth",
|
||||
"authmodule", usr.GetAuthenticatedBy())
|
||||
tokenRefreshMetadata := &TokenRefreshMetadata{
|
||||
ExternalSessionID: 0,
|
||||
}
|
||||
var persistedToken *oauth2.Token
|
||||
// Find the external session associated with the user and session token
|
||||
// regardless of the improvedExternalSessionHandling feature toggle,
|
||||
// because Grafana writes and updates both tables to make the switch
|
||||
// to the new session handling smoother.
|
||||
externalSession, err := o.getExternalSession(ctx, usr, userID, sessionToken)
|
||||
if err != nil && !errors.Is(err, auth.ErrExternalSessionNotFound) {
|
||||
ctxLogger.Error("Failed to get external session", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the feature toggle is enabled, an external session is required.
|
||||
if o.features.IsEnabledGlobally(featuremgmt.FlagImprovedExternalSessionHandling) && (externalSession == nil || errors.Is(err, auth.ErrExternalSessionNotFound)) {
|
||||
ctxLogger.Error("No external session found for user", "userID", userID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// externalSession can be nil if Grafana was updated from a version where the
|
||||
// external session table was not used yet (did not exist) and the user has not logged in since
|
||||
// the version update (therefore no external session was created for the user yet).
|
||||
if externalSession != nil {
|
||||
tokenRefreshMetadata.ExternalSessionID = externalSession.ID
|
||||
}
|
||||
|
||||
authInfo, err := o.AuthInfoService.GetAuthInfo(ctx, &login.GetAuthInfoQuery{
|
||||
UserId: userID,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, user.ErrUserNotFound) {
|
||||
ctxLogger.Warn("No AuthInfo found for user", "userID", userID)
|
||||
return nil
|
||||
}
|
||||
|
||||
ctxLogger.Error("Failed to fetch AuthInfo for user", "userID", userID, "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
tokenRefreshMetadata.AuthID = authInfo.AuthId
|
||||
tokenRefreshMetadata.AuthModule = authInfo.AuthModule
|
||||
|
||||
if !strings.HasPrefix(tokenRefreshMetadata.AuthModule, "oauth_") {
|
||||
ctxLogger.Warn("The specified user's auth provider is not oauth",
|
||||
"authmodule", tokenRefreshMetadata.AuthModule)
|
||||
return nil
|
||||
}
|
||||
|
||||
var persistedToken *oauth2.Token
|
||||
if o.features.IsEnabledGlobally(featuremgmt.FlagImprovedExternalSessionHandling) {
|
||||
externalSession, err := o.sessionService.GetExternalSession(ctx, sessionToken.ExternalSessionId)
|
||||
if err != nil {
|
||||
if errors.Is(err, auth.ErrExternalSessionNotFound) {
|
||||
return nil
|
||||
}
|
||||
ctxLogger.Error("Failed to fetch external session", "error", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
persistedToken = buildOAuthTokenFromExternalSession(externalSession)
|
||||
|
||||
if persistedToken.RefreshToken == "" {
|
||||
return persistedToken
|
||||
}
|
||||
} else {
|
||||
authInfo, ok, _ := o.hasOAuthEntry(ctx, usr)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := checkOAuthRefreshToken(authInfo); err != nil {
|
||||
if errors.Is(err, ErrNoRefreshTokenFound) {
|
||||
return buildOAuthTokenFromAuthInfo(authInfo)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
persistedToken = buildOAuthTokenFromAuthInfo(authInfo)
|
||||
}
|
||||
|
||||
if persistedToken.RefreshToken == "" {
|
||||
return persistedToken
|
||||
}
|
||||
|
||||
refreshNeeded := needTokenRefresh(ctx, persistedToken)
|
||||
if !refreshNeeded {
|
||||
return persistedToken
|
||||
}
|
||||
|
||||
token, err := o.TryTokenRefresh(ctx, usr, sessionToken)
|
||||
token, err := o.TryTokenRefresh(ctx, usr, tokenRefreshMetadata)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoRefreshTokenFound) {
|
||||
return persistedToken
|
||||
|
@ -214,7 +241,7 @@ func (o *Service) hasOAuthEntry(ctx context.Context, usr identity.Requester) (*l
|
|||
|
||||
// TryTokenRefresh returns an error in case the OAuth token refresh was unsuccessful
|
||||
// It uses a server lock to prevent getting the Refresh Token multiple times for a given User
|
||||
func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) (*oauth2.Token, error) {
|
||||
func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, tokenRefreshMetadata *TokenRefreshMetadata) (*oauth2.Token, error) {
|
||||
ctx, span := o.tracer.Start(ctx, "oauthtoken.TryTokenRefresh")
|
||||
defer span.End()
|
||||
|
||||
|
@ -239,14 +266,13 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, s
|
|||
|
||||
ctxLogger = ctxLogger.New("userID", userID)
|
||||
|
||||
// get the token's auth provider (f.e. azuread)
|
||||
currAuthenticator := usr.GetAuthenticatedBy()
|
||||
if !strings.HasPrefix(currAuthenticator, "oauth") {
|
||||
ctxLogger.Warn("The specified user's auth provider is not OAuth", "authmodule", currAuthenticator)
|
||||
if !strings.HasPrefix(tokenRefreshMetadata.AuthModule, "oauth_") {
|
||||
ctxLogger.Warn("The specified user's auth provider is not oauth",
|
||||
"authmodule", tokenRefreshMetadata.AuthModule)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
provider := strings.TrimPrefix(currAuthenticator, "oauth_")
|
||||
provider := strings.TrimPrefix(tokenRefreshMetadata.AuthModule, "oauth_")
|
||||
currentOAuthInfo := o.SocialService.GetOAuthInfoProvider(provider)
|
||||
if currentOAuthInfo == nil {
|
||||
ctxLogger.Warn("OAuth provider not found", "provider", provider)
|
||||
|
@ -261,7 +287,7 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, s
|
|||
|
||||
lockKey := fmt.Sprintf("oauth-refresh-token-%d", userID)
|
||||
if o.features.IsEnabledGlobally(featuremgmt.FlagImprovedExternalSessionHandling) {
|
||||
lockKey = fmt.Sprintf("oauth-refresh-token-%d-%d", userID, sessionToken.ExternalSessionId)
|
||||
lockKey = fmt.Sprintf("oauth-refresh-token-%d-%d", userID, tokenRefreshMetadata.ExternalSessionID)
|
||||
}
|
||||
|
||||
lockTimeConfig := serverlock.LockTimeConfig{
|
||||
|
@ -290,7 +316,7 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, s
|
|||
var persistedToken *oauth2.Token
|
||||
var externalSession *auth.ExternalSession
|
||||
if o.features.IsEnabledGlobally(featuremgmt.FlagImprovedExternalSessionHandling) {
|
||||
externalSession, err = o.sessionService.GetExternalSession(ctx, sessionToken.ExternalSessionId)
|
||||
externalSession, err = o.sessionService.GetExternalSession(ctx, tokenRefreshMetadata.ExternalSessionID)
|
||||
if err != nil {
|
||||
if errors.Is(err, auth.ErrExternalSessionNotFound) {
|
||||
ctxLogger.Error("External session was not found for user", "error", err)
|
||||
|
@ -321,7 +347,7 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, s
|
|||
return
|
||||
}
|
||||
|
||||
newToken, cmdErr = o.tryGetOrRefreshOAuthToken(ctx, persistedToken, usr, sessionToken)
|
||||
newToken, cmdErr = o.tryGetOrRefreshOAuthToken(ctx, persistedToken, usr, tokenRefreshMetadata)
|
||||
}, retryOpt)
|
||||
if lockErr != nil {
|
||||
ctxLogger.Error("Failed to obtain token refresh lock", "error", lockErr)
|
||||
|
@ -330,14 +356,14 @@ func (o *Service) TryTokenRefresh(ctx context.Context, usr identity.Requester, s
|
|||
|
||||
// Silence ErrNoRefreshTokenFound
|
||||
if errors.Is(cmdErr, ErrNoRefreshTokenFound) {
|
||||
return nil, nil
|
||||
return nil, ErrNoRefreshTokenFound
|
||||
}
|
||||
|
||||
return newToken, cmdErr
|
||||
}
|
||||
|
||||
// InvalidateOAuthTokens invalidates the OAuth tokens (access_token, refresh_token) and sets the Expiry to default/zero
|
||||
func (o *Service) InvalidateOAuthTokens(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) error {
|
||||
func (o *Service) InvalidateOAuthTokens(ctx context.Context, usr identity.Requester, tokenRefreshMetadata *TokenRefreshMetadata) error {
|
||||
userID, err := usr.GetInternalID()
|
||||
if err != nil {
|
||||
logger.Error("Failed to convert user id to int", "id", usr.GetID(), "error", err)
|
||||
|
@ -347,7 +373,7 @@ func (o *Service) InvalidateOAuthTokens(ctx context.Context, usr identity.Reques
|
|||
ctxLogger := logger.FromContext(ctx).New("userID", userID)
|
||||
|
||||
if o.features.IsEnabledGlobally(featuremgmt.FlagImprovedExternalSessionHandling) {
|
||||
err := o.sessionService.UpdateExternalSession(ctx, sessionToken.ExternalSessionId, &auth.UpdateExternalSessionCommand{
|
||||
err := o.sessionService.UpdateExternalSession(ctx, tokenRefreshMetadata.ExternalSessionID, &auth.UpdateExternalSessionCommand{
|
||||
Token: &oauth2.Token{},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -358,8 +384,8 @@ func (o *Service) InvalidateOAuthTokens(ctx context.Context, usr identity.Reques
|
|||
|
||||
return o.AuthInfoService.UpdateAuthInfo(ctx, &login.UpdateAuthInfoCommand{
|
||||
UserId: userID,
|
||||
AuthModule: usr.GetAuthenticatedBy(),
|
||||
AuthId: usr.GetAuthID(),
|
||||
AuthModule: tokenRefreshMetadata.AuthModule,
|
||||
AuthId: tokenRefreshMetadata.AuthID,
|
||||
OAuthToken: &oauth2.Token{
|
||||
AccessToken: "",
|
||||
RefreshToken: "",
|
||||
|
@ -368,13 +394,14 @@ func (o *Service) InvalidateOAuthTokens(ctx context.Context, usr identity.Reques
|
|||
})
|
||||
}
|
||||
|
||||
func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, persistedToken *oauth2.Token, usr identity.Requester, sessionToken *auth.UserToken) (*oauth2.Token, error) {
|
||||
func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, persistedToken *oauth2.Token, usr identity.Requester, tokenRefreshMetadata *TokenRefreshMetadata) (*oauth2.Token, error) {
|
||||
ctx, span := o.tracer.Start(ctx, "oauthtoken.tryGetOrRefreshOAuthToken")
|
||||
defer span.End()
|
||||
|
||||
userID, err := usr.GetInternalID()
|
||||
if err != nil {
|
||||
logger.Error("Failed to convert user id to int", "id", usr.GetID(), "error", err)
|
||||
span.SetStatus(codes.Error, "Failed to convert user id to int")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -382,8 +409,11 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, persistedToken
|
|||
|
||||
ctxLogger := logger.FromContext(ctx).New("userID", userID)
|
||||
|
||||
// tryGetOrRefreshOAuthToken assumes that the AuthModule has RefreshToken enabled
|
||||
// which is checked by the caller (TryTokenRefresh)
|
||||
if persistedToken.RefreshToken == "" {
|
||||
ctxLogger.Warn("No refresh token available", "authmodule", usr.GetAuthenticatedBy())
|
||||
ctxLogger.Error("No refresh token available", "authmodule", tokenRefreshMetadata.AuthModule)
|
||||
span.SetStatus(codes.Error, ErrNoRefreshTokenFound.Error())
|
||||
return nil, ErrNoRefreshTokenFound
|
||||
}
|
||||
|
||||
|
@ -392,50 +422,44 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, persistedToken
|
|||
return persistedToken, nil
|
||||
}
|
||||
|
||||
authProvider := usr.GetAuthenticatedBy()
|
||||
connect, err := o.SocialService.GetConnector(authProvider)
|
||||
connect, err := o.SocialService.GetConnector(tokenRefreshMetadata.AuthModule)
|
||||
if err != nil {
|
||||
ctxLogger.Error("Failed to get oauth connector", "provider", authProvider, "error", err)
|
||||
ctxLogger.Error("Failed to get oauth connector", "provider", tokenRefreshMetadata.AuthModule, "error", err)
|
||||
span.SetStatus(codes.Error, "Failed to get oauth connector: "+err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := o.SocialService.GetOAuthHttpClient(authProvider)
|
||||
client, err := o.SocialService.GetOAuthHttpClient(tokenRefreshMetadata.AuthModule)
|
||||
if err != nil {
|
||||
ctxLogger.Error("Failed to get oauth http client", "provider", authProvider, "error", err)
|
||||
ctxLogger.Error("Failed to get oauth http client", "provider", tokenRefreshMetadata.AuthModule, "error", err)
|
||||
span.SetStatus(codes.Error, "Failed to get oauth http client")
|
||||
return nil, err
|
||||
}
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, client)
|
||||
|
||||
start := time.Now()
|
||||
// TokenSource handles refreshing the token if it has expired
|
||||
token, err := connect.TokenSource(ctx, persistedToken).Token()
|
||||
token, refreshErr := connect.TokenSource(ctx, persistedToken).Token()
|
||||
duration := time.Since(start)
|
||||
o.tokenRefreshDuration.WithLabelValues(authProvider, fmt.Sprintf("%t", err == nil)).Observe(duration.Seconds())
|
||||
o.tokenRefreshDuration.WithLabelValues(tokenRefreshMetadata.AuthModule, fmt.Sprintf("%t", err == nil)).Observe(duration.Seconds())
|
||||
|
||||
if err != nil {
|
||||
if refreshErr != nil {
|
||||
span.SetAttributes(attribute.Bool("token_refreshed", false))
|
||||
ctxLogger.Error("Failed to retrieve oauth access token",
|
||||
"provider", usr.GetAuthenticatedBy(), "error", err)
|
||||
"provider", tokenRefreshMetadata.AuthModule, "error", refreshErr)
|
||||
|
||||
// token refresh failed, invalidate the old token
|
||||
if err := o.InvalidateOAuthTokens(ctx, usr, sessionToken); err != nil {
|
||||
ctxLogger.Warn("Failed to invalidate OAuth tokens", "authID", usr.GetAuthID(), "error", err)
|
||||
if err := o.InvalidateOAuthTokens(ctx, usr, tokenRefreshMetadata); err != nil {
|
||||
ctxLogger.Warn("Failed to invalidate OAuth tokens", "authID", tokenRefreshMetadata.AuthID, "error", err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil, refreshErr
|
||||
}
|
||||
|
||||
span.SetAttributes(attribute.Bool("token_refreshed", true))
|
||||
|
||||
// If the tokens are not the same, update the entry in the DB
|
||||
if !tokensEq(persistedToken, token) {
|
||||
updateAuthCommand := &login.UpdateAuthInfoCommand{
|
||||
UserId: userID,
|
||||
AuthModule: usr.GetAuthenticatedBy(),
|
||||
AuthId: usr.GetAuthID(),
|
||||
OAuthToken: token,
|
||||
}
|
||||
|
||||
if o.Cfg.Env == setting.Dev {
|
||||
ctxLogger.Debug("Oauth got token",
|
||||
"auth_module", usr.GetAuthenticatedBy(),
|
||||
|
@ -446,17 +470,32 @@ func (o *Service) tryGetOrRefreshOAuthToken(ctx context.Context, persistedToken
|
|||
}
|
||||
|
||||
if !o.features.IsEnabledGlobally(featuremgmt.FlagImprovedExternalSessionHandling) {
|
||||
updateAuthCommand := &login.UpdateAuthInfoCommand{
|
||||
UserId: userID,
|
||||
AuthModule: tokenRefreshMetadata.AuthModule,
|
||||
AuthId: tokenRefreshMetadata.AuthID,
|
||||
OAuthToken: token,
|
||||
}
|
||||
if err := o.AuthInfoService.UpdateAuthInfo(ctx, updateAuthCommand); err != nil {
|
||||
ctxLogger.Error("Failed to update auth info during token refresh", "authID", usr.GetAuthID(), "error", err)
|
||||
ctxLogger.Error("Failed to update auth info during token refresh", "authID", tokenRefreshMetadata.AuthID, "error", err)
|
||||
span.SetStatus(codes.Error, "Failed to update auth info during token refresh")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := o.sessionService.UpdateExternalSession(ctx, sessionToken.ExternalSessionId, &auth.UpdateExternalSessionCommand{
|
||||
Token: token,
|
||||
}); err != nil {
|
||||
ctxLogger.Error("Failed to update external session during token refresh", "error", err)
|
||||
return nil, err
|
||||
// Update the external session with the new token if we the user has an external session,
|
||||
// regardless of the feature flag state to keep the `user_external_session` table in sync.
|
||||
// ExternalSessionID should always be set except for some edge cases:
|
||||
// - when Grafana was updated to a version where the `improvedExternalSessionHandling` feature flag
|
||||
// was enabled after the user logged in
|
||||
if tokenRefreshMetadata.ExternalSessionID != 0 {
|
||||
if err := o.sessionService.UpdateExternalSession(ctx, tokenRefreshMetadata.ExternalSessionID, &auth.UpdateExternalSessionCommand{
|
||||
Token: token,
|
||||
}); err != nil {
|
||||
ctxLogger.Error("Failed to update external session during token refresh", "error", err)
|
||||
span.SetStatus(codes.Error, "Failed to update external session during token refresh")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ctxLogger.Debug("Updated oauth info for user")
|
||||
|
@ -502,6 +541,11 @@ func needTokenRefresh(ctx context.Context, persistedToken *oauth2.Token) bool {
|
|||
|
||||
ctxLogger := logger.FromContext(ctx)
|
||||
|
||||
if persistedToken.AccessToken == "" {
|
||||
ctxLogger.Debug("Access token has been cleared, need to refresh")
|
||||
return true
|
||||
}
|
||||
|
||||
idTokenExp, err := GetIDTokenExpiry(persistedToken)
|
||||
if err != nil {
|
||||
ctxLogger.Warn("Could not get ID Token expiry", "error", err)
|
||||
|
@ -552,22 +596,6 @@ func buildOAuthTokenFromExternalSession(externalSession *auth.ExternalSession) *
|
|||
return token
|
||||
}
|
||||
|
||||
func checkOAuthRefreshToken(authInfo *login.UserAuth) error {
|
||||
if !strings.Contains(authInfo.AuthModule, "oauth") {
|
||||
logger.Warn("The specified user's auth provider is not oauth",
|
||||
"authmodule", authInfo.AuthModule, "userid", authInfo.UserId)
|
||||
return ErrNotAnOAuthProvider
|
||||
}
|
||||
|
||||
if authInfo.OAuthRefreshToken == "" {
|
||||
logger.Warn("No refresh token available",
|
||||
"authmodule", authInfo.AuthModule, "userid", authInfo.UserId)
|
||||
return ErrNoRefreshTokenFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIDTokenExpiry extracts the expiry time from the ID token
|
||||
func GetIDTokenExpiry(token *oauth2.Token) (time.Time, error) {
|
||||
idToken, ok := token.Extra("id_token").(string)
|
||||
|
@ -601,3 +629,28 @@ func getExpiryWithSkew(expiry time.Time) (adjustedExpiry time.Time, hasTokenExpi
|
|||
hasTokenExpired = adjustedExpiry.Before(time.Now())
|
||||
return
|
||||
}
|
||||
|
||||
// getExternalSession fetches the external session based on the user and session token.
|
||||
// When using the render module, it fetches the most recent external session for the user
|
||||
// since the session token ID is not available.
|
||||
// For regular users, it uses the session token ID to fetch the external session.
|
||||
func (o *Service) getExternalSession(ctx context.Context, usr identity.Requester, userID int64, sessionToken *auth.UserToken) (*auth.ExternalSession, error) {
|
||||
if usr.GetAuthenticatedBy() == login.RenderModule {
|
||||
// When using render module, we don't have the session token ID, so we need to fetch the most recent session
|
||||
// entry for the user (as it is done with the old flow).
|
||||
// In the future, we might want to consider passing the session token ID to the render module to make this more robust.
|
||||
externalSessions, err := o.sessionService.FindExternalSessions(ctx, &auth.ListExternalSessionQuery{UserID: userID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(externalSessions) == 0 || externalSessions[0] == nil {
|
||||
return nil, auth.ErrExternalSessionNotFound
|
||||
}
|
||||
|
||||
return externalSessions[0], nil
|
||||
}
|
||||
|
||||
// For regular users, we use the session token ID to fetch the external session
|
||||
return o.sessionService.GetExternalSession(ctx, sessionToken.ExternalSessionId)
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,13 +8,14 @@ import (
|
|||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/services/auth"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/oauthtoken"
|
||||
)
|
||||
|
||||
type MockOauthTokenService struct {
|
||||
GetCurrentOauthTokenFunc func(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) *oauth2.Token
|
||||
IsOAuthPassThruEnabledFunc func(ds *datasources.DataSource) bool
|
||||
InvalidateOAuthTokensFunc func(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) error
|
||||
TryTokenRefreshFunc func(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) (*oauth2.Token, error)
|
||||
InvalidateOAuthTokensFunc func(ctx context.Context, usr identity.Requester, metadata *oauthtoken.TokenRefreshMetadata) error
|
||||
TryTokenRefreshFunc func(ctx context.Context, usr identity.Requester, metadata *oauthtoken.TokenRefreshMetadata) (*oauth2.Token, error)
|
||||
}
|
||||
|
||||
func (m *MockOauthTokenService) GetCurrentOAuthToken(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) *oauth2.Token {
|
||||
|
@ -31,16 +32,16 @@ func (m *MockOauthTokenService) IsOAuthPassThruEnabled(ds *datasources.DataSourc
|
|||
return false
|
||||
}
|
||||
|
||||
func (m *MockOauthTokenService) InvalidateOAuthTokens(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) error {
|
||||
func (m *MockOauthTokenService) InvalidateOAuthTokens(ctx context.Context, usr identity.Requester, metadata *oauthtoken.TokenRefreshMetadata) error {
|
||||
if m.InvalidateOAuthTokensFunc != nil {
|
||||
return m.InvalidateOAuthTokensFunc(ctx, usr, sessionToken)
|
||||
return m.InvalidateOAuthTokensFunc(ctx, usr, metadata)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockOauthTokenService) TryTokenRefresh(ctx context.Context, usr identity.Requester, sessionToken *auth.UserToken) (*oauth2.Token, error) {
|
||||
func (m *MockOauthTokenService) TryTokenRefresh(ctx context.Context, usr identity.Requester, metadata *oauthtoken.TokenRefreshMetadata) (*oauth2.Token, error) {
|
||||
if m.TryTokenRefreshFunc != nil {
|
||||
return m.TryTokenRefreshFunc(ctx, usr, sessionToken)
|
||||
return m.TryTokenRefreshFunc(ctx, usr, metadata)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -29,10 +29,10 @@ func (s *Service) IsOAuthPassThruEnabled(ds *datasources.DataSource) bool {
|
|||
return oauthtoken.IsOAuthPassThruEnabled(ds)
|
||||
}
|
||||
|
||||
func (s *Service) TryTokenRefresh(context.Context, identity.Requester, *auth.UserToken) (*oauth2.Token, error) {
|
||||
func (s *Service) TryTokenRefresh(context.Context, identity.Requester, *oauthtoken.TokenRefreshMetadata) (*oauth2.Token, error) {
|
||||
return s.Token, nil
|
||||
}
|
||||
|
||||
func (s *Service) InvalidateOAuthTokens(context.Context, identity.Requester, *auth.UserToken) error {
|
||||
func (s *Service) InvalidateOAuthTokens(context.Context, identity.Requester, *oauthtoken.TokenRefreshMetadata) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -168,6 +168,16 @@ func (s *QueryHistoryService) starHandler(c *contextmodel.ReqContext) response.R
|
|||
if len(queryUID) > 0 && !util.IsValidShortUID(queryUID) {
|
||||
return response.Error(http.StatusNotFound, "Query in query history not found", nil)
|
||||
}
|
||||
if s.k8sClients != nil {
|
||||
if err := s.k8sClients.AddStar(c, queryUID); err != nil {
|
||||
return response.Error(http.StatusInternalServerError, "Failed to star query in query history", err)
|
||||
}
|
||||
return response.JSON(http.StatusOK, QueryHistoryResponse{
|
||||
Result: QueryHistoryDTO{
|
||||
UID: queryUID,
|
||||
Starred: true,
|
||||
}})
|
||||
}
|
||||
|
||||
query, err := s.StarQueryInQueryHistory(c.Req.Context(), c.SignedInUser, queryUID)
|
||||
if err != nil {
|
||||
|
@ -192,6 +202,16 @@ func (s *QueryHistoryService) unstarHandler(c *contextmodel.ReqContext) response
|
|||
if len(queryUID) > 0 && !util.IsValidShortUID(queryUID) {
|
||||
return response.Error(http.StatusNotFound, "Query in query history not found", nil)
|
||||
}
|
||||
if s.k8sClients != nil {
|
||||
if err := s.k8sClients.RemoveStar(c, queryUID); err != nil {
|
||||
return response.Error(http.StatusInternalServerError, "Failed to star query in query history", err)
|
||||
}
|
||||
return response.JSON(http.StatusOK, QueryHistoryResponse{
|
||||
Result: QueryHistoryDTO{
|
||||
UID: queryUID,
|
||||
Starred: true,
|
||||
}})
|
||||
}
|
||||
|
||||
query, err := s.UnstarQueryInQueryHistory(c.Req.Context(), c.SignedInUser, queryUID)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
package queryhistory
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
preferencesV1 "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver"
|
||||
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
|
||||
)
|
||||
|
||||
type k8sClients struct {
|
||||
namespacer authlib.NamespaceFormatter
|
||||
configProvider apiserver.DirectRestConfigProvider
|
||||
}
|
||||
|
||||
// GetStars implements K8sClients.
|
||||
func (k *k8sClients) GetStars(c *contextmodel.ReqContext) ([]string, error) {
|
||||
dyn, err := dynamic.NewForConfig(k.configProvider.GetDirectRestConfig(c))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := dyn.Resource(preferencesV1.StarsResourceInfo.GroupVersionResource()).Namespace(k.namespacer(c.OrgID))
|
||||
|
||||
ctx := c.Req.Context()
|
||||
user, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, _ := client.Get(ctx, "user-"+user.GetIdentifier(), v1.GetOptions{})
|
||||
if obj != nil {
|
||||
resources, ok, _ := unstructured.NestedSlice(obj.Object, "spec", "resource")
|
||||
if ok && resources != nil {
|
||||
for _, r := range resources {
|
||||
tmp, ok := r.(map[string]any)
|
||||
if ok {
|
||||
g, _, _ := unstructured.NestedString(tmp, "group")
|
||||
k, _, _ := unstructured.NestedString(tmp, "kind")
|
||||
if k == "Query" && g == "history.grafana.app" {
|
||||
names, _, _ := unstructured.NestedStringSlice(tmp, "names")
|
||||
return names, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// AddStar implements K8sClients.
|
||||
func (k *k8sClients) AddStar(c *contextmodel.ReqContext, uid string) error {
|
||||
dyn, err := kubernetes.NewForConfig(k.configProvider.GetDirectRestConfig(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := c.Req.Context()
|
||||
user, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns := k.namespacer(c.OrgID)
|
||||
|
||||
client := dyn.RESTClient()
|
||||
rsp := client.Put().AbsPath(
|
||||
"apis", preferencesV1.APIGroup, preferencesV1.APIVersion, "namespaces", ns,
|
||||
"stars", "user-"+user.GetIdentifier(),
|
||||
"update", "history.grafana.app", "Query", uid,
|
||||
).Do(ctx)
|
||||
|
||||
return rsp.Error()
|
||||
}
|
||||
|
||||
// RemoveStar implements K8sClients.
|
||||
func (k *k8sClients) RemoveStar(c *contextmodel.ReqContext, uid string) error {
|
||||
dyn, err := kubernetes.NewForConfig(k.configProvider.GetDirectRestConfig(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := c.Req.Context()
|
||||
user, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ns := k.namespacer(c.OrgID)
|
||||
|
||||
client := dyn.RESTClient()
|
||||
rsp := client.Delete().AbsPath(
|
||||
"apis", preferencesV1.APIGroup, preferencesV1.APIVersion, "namespaces", ns,
|
||||
"stars", "user-"+user.GetIdentifier(),
|
||||
"update", "history.grafana.app", "Query", uid,
|
||||
).Do(ctx)
|
||||
|
||||
return rsp.Error()
|
||||
}
|
|
@ -8,11 +8,20 @@ import (
|
|||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
ac "github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/user"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
|
||||
func ProvideService(cfg *setting.Cfg, sqlStore db.DB, routeRegister routing.RouteRegister, accessControl ac.AccessControl) *QueryHistoryService {
|
||||
func ProvideService(cfg *setting.Cfg,
|
||||
sqlStore db.DB,
|
||||
routeRegister routing.RouteRegister,
|
||||
accessControl ac.AccessControl,
|
||||
features featuremgmt.FeatureToggles,
|
||||
configProvider apiserver.DirectRestConfigProvider,
|
||||
) *QueryHistoryService {
|
||||
s := &QueryHistoryService{
|
||||
store: sqlStore,
|
||||
Cfg: cfg,
|
||||
|
@ -24,6 +33,12 @@ func ProvideService(cfg *setting.Cfg, sqlStore db.DB, routeRegister routing.Rout
|
|||
|
||||
// Register routes only when query history is enabled
|
||||
if s.Cfg.QueryHistoryEnabled {
|
||||
if features.IsEnabledGlobally(featuremgmt.FlagKubernetesStars) {
|
||||
s.k8sClients = &k8sClients{
|
||||
namespacer: request.GetNamespaceMapper(s.Cfg),
|
||||
configProvider: configProvider,
|
||||
}
|
||||
}
|
||||
s.registerAPIEndpoints()
|
||||
}
|
||||
|
||||
|
@ -48,6 +63,7 @@ type QueryHistoryService struct {
|
|||
log log.Logger
|
||||
now func() time.Time
|
||||
accessControl ac.AccessControl
|
||||
k8sClients *k8sClients
|
||||
}
|
||||
|
||||
func (s QueryHistoryService) CreateQueryInQueryHistory(ctx context.Context, user *user.SignedInUser, cmd CreateQueryInQueryHistoryCommand) (QueryHistoryDTO, error) {
|
||||
|
|
|
@ -170,9 +170,9 @@ func (s *server) BulkProcess(stream resourcepb.BulkStore_BulkProcessServer) erro
|
|||
})
|
||||
}
|
||||
|
||||
// Verify all request keys are valid
|
||||
// Verify all collection request keys are valid
|
||||
for _, k := range settings.Collection {
|
||||
if r := verifyRequestKey(k); r != nil {
|
||||
if r := verifyRequestKeyCollection(k); r != nil {
|
||||
return sendAndClose(&resourcepb.BulkResponse{
|
||||
Error: &resourcepb.ErrorResult{
|
||||
Message: fmt.Sprintf("invalid request key: %s", r.Message),
|
||||
|
|
|
@ -8,7 +8,24 @@ import (
|
|||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
)
|
||||
|
||||
// verifyRequestKey verifies that the key is valid for a request (all fields set and valid, including name)
|
||||
func verifyRequestKey(key *resourcepb.ResourceKey) *resourcepb.ErrorResult {
|
||||
if err := verifyRequestKeyNamespaceGroupResource(key); err != nil {
|
||||
return NewBadRequestError(err.Message)
|
||||
}
|
||||
if err := validation.IsValidGrafanaName(key.Name); err != nil {
|
||||
return NewBadRequestError(err[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyRequestKeyCollection verifies that the key is valid for a collection (namespace/group/resource set and valid)
|
||||
func verifyRequestKeyCollection(key *resourcepb.ResourceKey) *resourcepb.ErrorResult {
|
||||
return verifyRequestKeyNamespaceGroupResource(key)
|
||||
}
|
||||
|
||||
// verifyRequestKeyNamespaceGroupResource verifies that the key has namespace/group/resource set and valid
|
||||
func verifyRequestKeyNamespaceGroupResource(key *resourcepb.ResourceKey) *resourcepb.ErrorResult {
|
||||
if key == nil {
|
||||
return NewBadRequestError("missing resource key")
|
||||
}
|
||||
|
@ -27,9 +44,6 @@ func verifyRequestKey(key *resourcepb.ResourceKey) *resourcepb.ErrorResult {
|
|||
if err := validation.IsValidateResource(key.Resource); err != nil {
|
||||
return NewBadRequestError(err[0])
|
||||
}
|
||||
if err := validation.IsValidGrafanaName(key.Name); err != nil {
|
||||
return NewBadRequestError(err[0])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue