Merge branch 'main' of github.com:grafana/grafana into konrad-poc/k8s-rules-api-test
CodeQL checks / Detect whether code changed (push) Waiting to run Details
CodeQL checks / Analyze (actions) (push) Blocked by required conditions Details
CodeQL checks / Analyze (go) (push) Blocked by required conditions Details
CodeQL checks / Analyze (javascript) (push) Blocked by required conditions Details

This commit is contained in:
Konrad Lalik 2025-10-07 15:30:03 +02:00
commit 37fc31e3f1
No known key found for this signature in database
239 changed files with 12003 additions and 1324 deletions

2
.github/CODEOWNERS vendored
View File

@ -88,6 +88,7 @@
/apps/preferences/ @grafana/grafana-app-platform-squad @grafana/grafana-frontend-platform /apps/preferences/ @grafana/grafana-app-platform-squad @grafana/grafana-frontend-platform
/apps/shorturl/ @grafana/sharing-squad /apps/shorturl/ @grafana/sharing-squad
/apps/secret/ @grafana/grafana-operator-experience-squad /apps/secret/ @grafana/grafana-operator-experience-squad
/apps/scope/ @grafana/grafana-operator-experience-squad
/apps/investigations/ @fcjack @matryer @svennergr /apps/investigations/ @fcjack @matryer @svennergr
/apps/advisor/ @grafana/plugins-platform-backend /apps/advisor/ @grafana/plugins-platform-backend
/apps/iam/ @grafana/access-squad /apps/iam/ @grafana/access-squad
@ -629,6 +630,7 @@
/packages/grafana-runtime/rollup.config.ts @grafana/grafana-frontend-platform /packages/grafana-runtime/rollup.config.ts @grafana/grafana-frontend-platform
/packages/grafana-runtime/src/index.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend /packages/grafana-runtime/src/index.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend
/packages/grafana-runtime/src/internal/index.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend /packages/grafana-runtime/src/internal/index.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend
/packages/grafana-runtime/src/internal/openFeature @grafana/grafana-frontend-platform
/packages/grafana-runtime/src/unstable.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend /packages/grafana-runtime/src/unstable.ts @grafana/grafana-frontend-platform @grafana/plugins-platform-frontend
/packages/grafana-runtime/tsconfig.build.json @grafana/grafana-frontend-platform /packages/grafana-runtime/tsconfig.build.json @grafana/grafana-frontend-platform
/packages/grafana-runtime/tsconfig.json @grafana/grafana-frontend-platform /packages/grafana-runtime/tsconfig.json @grafana/grafana-frontend-platform

View File

@ -99,6 +99,7 @@ COPY apps/correlations apps/correlations
COPY apps/preferences apps/preferences COPY apps/preferences apps/preferences
COPY apps/provisioning apps/provisioning COPY apps/provisioning apps/provisioning
COPY apps/secret apps/secret COPY apps/secret apps/secret
COPY apps/scope apps/scope
COPY apps/investigations apps/investigations COPY apps/investigations apps/investigations
COPY apps/advisor apps/advisor COPY apps/advisor apps/advisor
COPY apps/dashboard apps/dashboard COPY apps/dashboard apps/dashboard

View File

@ -198,7 +198,7 @@ func sortPanelsByGridPos(dashboard map[string]interface{}) {
return return
} }
sort.Slice(panels, func(i, j int) bool { sort.SliceStable(panels, func(i, j int) bool {
panelA := panels[i] panelA := panels[i]
panelB := panels[j] panelB := panels[j]
@ -831,7 +831,7 @@ func cleanupPanelList(panels []interface{}) {
// sortPanelsByGridPosition sorts panels by grid position (matches frontend sortPanelsByGridPos behavior) // sortPanelsByGridPosition sorts panels by grid position (matches frontend sortPanelsByGridPos behavior)
func sortPanelsByGridPosition(panels []interface{}) { func sortPanelsByGridPosition(panels []interface{}) {
sort.Slice(panels, func(i, j int) bool { sort.SliceStable(panels, func(i, j int) bool {
panelA, okA := panels[i].(map[string]interface{}) panelA, okA := panels[i].(map[string]interface{})
panelB, okB := panels[j].(map[string]interface{}) panelB, okB := panels[j].(map[string]interface{})
if !okA || !okB { if !okA || !okB {

View File

@ -49,10 +49,15 @@ func upgradeToGridLayout(dashboard map[string]interface{}) {
maxPanelID := getMaxPanelID(rows) maxPanelID := getMaxPanelID(rows)
nextRowID := maxPanelID + 1 nextRowID := maxPanelID + 1
// Get existing panels // Match frontend: dashboard.panels already exists with top-level panels
var finalPanels []interface{} // The frontend's this.dashboard.panels is initialized in the constructor with existing panels
if existingPanels, ok := dashboard["panels"].([]interface{}); ok { // Then upgradeToGridLayout adds more panels to it
finalPanels = existingPanels
// Initialize panels array - make a copy to avoid modifying the original
panels := []interface{}{}
if existingPanels, ok := dashboard["panels"].([]interface{}); ok && len(existingPanels) > 0 {
// Copy existing panels to preserve order
panels = append(panels, existingPanels...)
} }
// Add special "row" panels if even one row is collapsed, repeated or has visible title (line 1028 in TS) // Add special "row" panels if even one row is collapsed, repeated or has visible title (line 1028 in TS)
@ -72,7 +77,14 @@ func upgradeToGridLayout(dashboard map[string]interface{}) {
height := getRowHeight(row) height := getRowHeight(row)
rowGridHeight := getGridHeight(height) rowGridHeight := getGridHeight(height)
isCollapsed := GetBoolValue(row, "collapse") // Check if collapse property exists and get its value
collapseValue, hasCollapseProperty := row["collapse"]
isCollapsed := false
if hasCollapseProperty {
if b, ok := collapseValue.(bool); ok {
isCollapsed = b
}
}
var rowPanel map[string]interface{} var rowPanel map[string]interface{}
@ -110,9 +122,9 @@ func upgradeToGridLayout(dashboard map[string]interface{}) {
}, },
} }
// Set collapsed property only if the original row had a collapse property // Match frontend behavior: rowPanel.collapsed = row.collapse (line 1065 in TS)
// This matches the frontend behavior: rowPanel.collapsed = row.collapse // Only set collapsed property if the original row had a collapse property
if _, hasCollapse := row["collapse"]; hasCollapse { if hasCollapseProperty {
rowPanel["collapsed"] = isCollapsed rowPanel["collapsed"] = isCollapsed
} }
nextRowID++ nextRowID++
@ -128,20 +140,14 @@ func upgradeToGridLayout(dashboard map[string]interface{}) {
continue continue
} }
// Check if panel already has gridPos but no valid span // Match frontend logic: panel.span = panel.span || DEFAULT_PANEL_SPAN (line 1082 in TS)
// If span is missing or zero, and gridPos exists, preserve gridPos dimensions
var panelWidth, panelHeight int
span := GetFloatValue(panel, "span", 0) span := GetFloatValue(panel, "span", 0)
existingGridPos, hasGridPos := panel["gridPos"].(map[string]interface{}) if span == 0 {
span = defaultPanelSpan
if hasGridPos && span == 0 {
// Panel already has gridPos but no valid span - preserve its dimensions
panelWidth = GetIntValue(existingGridPos, "w", int(defaultPanelSpan*widthFactor))
panelHeight = GetIntValue(existingGridPos, "h", rowGridHeight)
} else {
panelWidth, panelHeight = calculatePanelDimensionsFromSpan(span, panel, widthFactor, rowGridHeight)
} }
panelWidth, panelHeight := calculatePanelDimensionsFromSpan(span, panel, widthFactor, rowGridHeight)
panelPos := rowArea.getPanelPosition(panelHeight, panelWidth) panelPos := rowArea.getPanelPosition(panelHeight, panelWidth)
yPos = rowArea.yPos yPos = rowArea.yPos
@ -157,21 +163,21 @@ func upgradeToGridLayout(dashboard map[string]interface{}) {
// Remove span (line 1080 in TS) // Remove span (line 1080 in TS)
delete(panel, "span") delete(panel, "span")
// Exact logic from lines 1082-1086 in TS // Match frontend logic: lines 1101-1105 in TS
if rowPanel != nil && isCollapsed { if rowPanel != nil && isCollapsed {
// Add to collapsed row's nested panels // Add to collapsed row's nested panels (line 1102)
if rowPanelPanels, ok := rowPanel["panels"].([]interface{}); ok { if rowPanelPanels, ok := rowPanel["panels"].([]interface{}); ok {
rowPanel["panels"] = append(rowPanelPanels, panel) rowPanel["panels"] = append(rowPanelPanels, panel)
} }
} else { } else {
// Add directly to dashboard panels // Add directly to panels array like frontend (line 1104)
finalPanels = append(finalPanels, panel) panels = append(panels, panel)
} }
} }
// Add row panel after processing all panels (lines 1089-1091 in TS) // Add row panel after regular panels from this row (lines 1108-1110 in TS)
if rowPanel != nil { if rowPanel != nil {
finalPanels = append(finalPanels, rowPanel) panels = append(panels, rowPanel)
} }
// Update yPos (lines 1093-1095 in TS) // Update yPos (lines 1093-1095 in TS)
@ -181,7 +187,7 @@ func upgradeToGridLayout(dashboard map[string]interface{}) {
} }
// Update the dashboard // Update the dashboard
dashboard["panels"] = finalPanels dashboard["panels"] = panels
delete(dashboard, "rows") delete(dashboard, "rows")
} }
@ -313,10 +319,7 @@ func getGridHeight(height float64) int {
} }
func calculatePanelDimensionsFromSpan(span float64, panel map[string]interface{}, widthFactor float64, defaultHeight int) (int, int) { func calculatePanelDimensionsFromSpan(span float64, panel map[string]interface{}, widthFactor float64, defaultHeight int) (int, int) {
// Set default span if still 0 // span should already be normalized by caller (line 1082 in DashboardMigrator.ts)
if span == 0 {
span = defaultPanelSpan
}
if minSpan, hasMinSpan := panel["minSpan"]; hasMinSpan { if minSpan, hasMinSpan := panel["minSpan"]; hasMinSpan {
if minSpanFloat, ok := ConvertToFloat(minSpan); ok && minSpanFloat > 0 { if minSpanFloat, ok := ConvertToFloat(minSpan); ok && minSpanFloat > 0 {

View File

@ -1532,6 +1532,123 @@ func TestV16(t *testing.T) {
// rows field should be removed // rows field should be removed
}, },
}, },
{
name: "should handle span zero by defaulting to DEFAULT_PANEL_SPAN",
input: map[string]interface{}{
"schemaVersion": 15,
"rows": []interface{}{
map[string]interface{}{
"collapse": false,
"showTitle": true, // Need this to create row panel
"title": "Test Row",
"height": 250,
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"type": "graph",
"span": 0, // This should be defaulted to 4 (DEFAULT_PANEL_SPAN)
},
map[string]interface{}{
"id": 2,
"type": "stat",
"span": 6, // Normal span value
},
},
},
},
},
expected: map[string]interface{}{
"schemaVersion": 16,
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"type": "graph",
"gridPos": map[string]interface{}{
"x": 0,
"y": 1,
"w": 8, // span 0 -> DEFAULT_PANEL_SPAN (4) -> 4 * 2 = 8 width
"h": 7, // default height
},
},
map[string]interface{}{
"id": 2,
"type": "stat",
"gridPos": map[string]interface{}{
"x": 8, // After first panel
"y": 1,
"w": 12, // span 6 -> 6 * 2 = 12 width
"h": 7, // default height
},
},
// Row panel should be created because showTitle is true
map[string]interface{}{
"id": 3,
"type": "row",
"title": "Test Row",
"collapsed": false, // Set because input has "collapse": false
"repeat": "",
"panels": []interface{}{},
"gridPos": map[string]interface{}{
"x": 0,
"y": 0,
"w": 24,
"h": 7,
},
},
},
},
},
{
name: "should not set collapsed property when input row has no collapse property",
input: map[string]interface{}{
"schemaVersion": 15,
"rows": []interface{}{
map[string]interface{}{
// No "collapse" property in input
"showTitle": true,
"title": "Test Row",
"height": 250,
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"type": "graph",
"span": 12,
},
},
},
},
},
expected: map[string]interface{}{
"schemaVersion": 16,
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"type": "graph",
"gridPos": map[string]interface{}{
"x": 0,
"y": 1,
"w": 24, // span 12 -> 12 * 2 = 24 width
"h": 7, // default height
},
},
// Row panel should be created because showTitle is true
map[string]interface{}{
"id": 2,
"type": "row",
"title": "Test Row",
// No "collapsed" property because input had no "collapse" property
"repeat": "",
"panels": []interface{}{},
"gridPos": map[string]interface{}{
"x": 0,
"y": 0,
"w": 24,
"h": 7,
},
},
},
},
},
} }
runMigrationTests(t, tests, schemaversion.V16) runMigrationTests(t, tests, schemaversion.V16)

View File

@ -0,0 +1,687 @@
{
"__requires": [
{
"id": "grafana",
"name": "Grafana",
"type": "grafana",
"version": "8.0.0"
}
],
"annotations": {
"list": []
},
"editable": false,
"gnetId": null,
"graphTooltip": 0,
"hideControls": false,
"links": [
{
"icon": "external link",
"targetBlank": true,
"title": "External Documentation",
"type": "link",
"url": "https://example.com/docs"
}
],
"panels": [
{
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 0
},
"options": {
"content": "This dashboard demonstrates various monitoring components for application observability and performance metrics.\n",
"mode": "markdown"
},
"title": "Application Monitoring",
"type": "text"
}
],
"refresh": "10s",
"rows": [
{
"collapse": false,
"collapsed": false,
"height": "250px",
"panels": [
{
"gridPos": {
"h": 11,
"w": 24,
"x": 0,
"y": 5
},
"id": 6,
"options": {
"content": "This service handles background processing tasks for the application system. It manages various types of operations including data synchronization, resource management, and batch processing.\n\nSupported operation types:\n1. Sync: Synchronizes data between different systems\n2. Process: Handles batch data processing tasks\n3. Cleanup: Removes outdated or temporary resources\n4. Update: Applies configuration changes across services\n\nService dependencies:\n- Data API: For reading and writing application data\n- Configuration Service: For managing system settings\n- Queue Service: For handling task scheduling\n- Storage Service: For persistent data management\n- Auth Service: For authentication and authorization\n- Metrics Service: For collecting operational statistics\n",
"mode": "markdown"
},
"span": 0,
"title": "Service Overview",
"type": "text"
},
{
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 16
},
"id": 7,
"options": {
"content": "Error monitoring helps identify issues in the system. This section displays error logs and success rates for operations.",
"mode": "markdown"
},
"span": 0,
"title": "Error Monitoring",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": 0
},
{
"color": "yellow",
"value": 0.95
},
{
"color": "green",
"value": 1
}
]
},
"unit": "percentunit"
}
},
"gridPos": {
"h": 9,
"w": 3,
"x": 0,
"y": 19
},
"id": 8,
"span": 0,
"targets": [
{
"expr": "sum by (action) (app_jobs_processed_total{outcome=\"success\", cluster=\"$cluster\", namespace=\"default\"})\n/\nsum by (action) (app_jobs_processed_total{cluster=\"$cluster\", namespace=\"default\"})\n",
"legendFormat": "{{action}}"
}
],
"title": "Job Success Rate",
"type": "stat"
},
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"gridPos": {
"h": 9,
"w": 10,
"x": 3,
"y": 19
},
"id": 9,
"options": {
"enableLogDetails": true,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"span": 0,
"targets": [
{
"expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt | level=\"error\""
}
],
"title": "Errors",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"gridPos": {
"h": 9,
"w": 11,
"x": 13,
"y": 19
},
"id": 10,
"options": {
"enableLogDetails": true,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"span": 0,
"targets": [
{
"expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt"
}
],
"title": "All",
"type": "logs"
},
{
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 28
},
"id": 11,
"options": {
"content": "Performance monitoring examines factors that affect system response times, including operation duration, queue lengths, and processing delays. This section provides metrics and traces for performance analysis.\n",
"mode": "markdown"
},
"span": 0,
"title": "Performance Analysis",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Number of concurrent processing threads available for handling operations",
"gridPos": {
"h": 6,
"w": 5,
"x": 0,
"y": 31
},
"id": 12,
"span": 0,
"targets": [
{
"expr": "max(app_worker_threads_active{cluster=\"$cluster\", namespace=\"default\"})",
"instant": true
}
],
"title": "Concurrent Job Drivers",
"type": "stat"
},
{
"datasource": {
"type": "tempo",
"uid": "${tempo}"
},
"gridPos": {
"h": 6,
"w": 19,
"x": 5,
"y": 31
},
"id": 13,
"span": 0,
"targets": [
{
"filters": [
{
"id": "span-name",
"operator": "=",
"scope": "span",
"tag": "name",
"value": [
"provisioning.sync.process"
]
},
{
"id": "k8s-cluster-name",
"operator": "=",
"scope": "resource",
"tag": "k8s.cluster.name",
"value": [
"$cluster"
]
}
],
"query": "{name=\"app.operation.process\"}",
"queryType": "traceqlSearch"
}
],
"title": "Recent Operation Traces",
"type": "table"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed",
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "yellow",
"value": 2
},
{
"color": "red",
"value": 5
}
]
},
"unit": "s"
}
},
"gridPos": {
"h": 10,
"w": 8,
"x": 0,
"y": 55
},
"id": 14,
"span": 0,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0",
"legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}",
"refId": "B"
},
{
"expr": "histogram_quantile(0.9, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0",
"legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}",
"refId": "C"
},
{
"expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0",
"legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}",
"refId": "D"
},
{
"expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) > 0",
"legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}",
"refId": "E"
}
],
"timeFrom": "7d",
"title": "7d avg of job durations",
"transformations": [
{
"id": "reduce",
"options": {
"mode": "seriesToRows",
"reducers": [
"mean"
]
}
},
{
"id": "seriesToRows"
},
{
"id": "organize",
"options": {
"renameByName": {
"Field": "Type",
"Mean": "Avg Duration",
"Metric": "Legend",
"Value": "Duration"
}
}
}
],
"type": "table"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed",
"gridPos": {
"h": 10,
"w": 16,
"x": 8,
"y": 55
},
"id": 15,
"span": 0,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}",
"refId": "B"
},
{
"expr": "histogram_quantile(0.95, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}",
"refId": "C"
},
{
"expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}",
"refId": "D"
},
{
"expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}",
"refId": "E"
}
],
"title": "Job Duration",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Total number of jobs waiting to be processed",
"gridPos": {
"h": 5,
"w": 4,
"x": 0,
"y": 65
},
"id": 16,
"span": 0,
"targets": [
{
"expr": "clamp_min(sum(app_operation_queue_size{cluster=\"$cluster\", namespace=\"default\"}), 0)",
"legendFormat": "Queue size"
}
],
"title": "Queue Size",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"fieldConfig": {
"defaults": {
"unit": "s"
}
},
"gridPos": {
"h": 5,
"w": 4,
"x": 4,
"y": 65
},
"id": 17,
"span": 0,
"targets": [
{
"expr": "avg(histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le)))",
"legendFormat": "Queue size"
}
],
"timeFrom": "7d",
"title": "7d avg Queue Wait Time",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "How long a job is in the queue before being picked up",
"gridPos": {
"h": 5,
"w": 16,
"x": 8,
"y": 65
},
"id": 18,
"span": 0,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.99",
"refId": "B"
},
{
"expr": "histogram_quantile(0.95, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.95",
"refId": "C"
},
{
"expr": "histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.5",
"refId": "D"
},
{
"expr": "histogram_quantile(0.1, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.1",
"refId": "E"
}
],
"title": "Queue Wait Time",
"type": "timeseries"
},
{
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 52
},
"id": 19,
"options": {
"content": "Resource utilization monitoring for application containers",
"mode": "markdown"
},
"span": 0,
"title": "Resource Monitoring",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 9,
"w": 7,
"x": 0,
"y": 55
},
"id": 20,
"span": 0,
"targets": [
{
"expr": "count by (cluster, channel)(label_replace(label_replace(kube_pod_container_info{namespace=\"default\", container=\"app-worker\", pod=~\"app-worker.*\", cluster=~\"$cluster\"}, \"version\", \"$1\", \"image\", \".+:(.+)\"), \"channel\", \"$1\", \"container\", \".+-(.+)\"))",
"legendFormat": "{{cluster}}"
}
],
"title": "Running Pod(s)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 9,
"w": 8,
"x": 7,
"y": 55
},
"id": 21,
"span": 0,
"targets": [
{
"expr": "max(kube_pod_container_resource_requests{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})",
"legendFormat": "Memory Request"
},
{
"expr": "max(kube_pod_container_resource_limits{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})",
"legendFormat": "Memory Limit"
},
{
"expr": "max(container_memory_usage_bytes{namespace=\"default\",cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"}) by (pod)",
"legendFormat": "Container usage {{pod}}"
}
],
"title": "Memory Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 9,
"w": 9,
"x": 15,
"y": 55
},
"id": 22,
"span": 0,
"targets": [
{
"expr": "sum(irate(container_cpu_usage_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container, cpu)",
"legendFormat": "Usage {{pod}}"
},
{
"expr": "sum(irate(container_cpu_cfs_throttled_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container)",
"legendFormat": "Throttling {{pod}}"
},
{
"expr": "max(kube_pod_container_resource_limits{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})",
"legendFormat": "CPU limit"
},
{
"expr": "max(kube_pod_container_resource_requests{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})",
"legendFormat": "CPU request"
}
],
"title": "CPU Utilization",
"type": "timeseries"
}
],
"repeat": null,
"repeatIteration": null,
"repeatRowId": null,
"showTitle": true,
"title": "Application Service",
"titleSize": "h6"
}
],
"schemaVersion": 15,
"style": "dark",
"tags": [
"as-code"
],
"templating": {
"list": [
{
"current": {
"value": "prometheus-datasource"
},
"hide": 0,
"label": "Data source",
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"value": "prometheus-datasource"
},
"name": "prom",
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"value": "loki-datasource"
},
"name": "loki",
"query": "loki",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"text": "tempo-datasource",
"value": "tempo-datasource"
},
"name": "tempo",
"query": "tempo",
"refresh": 1,
"regex": ".*tempo.*",
"type": "datasource"
},
{
"current": {
"text": "demo-cluster",
"value": "demo-cluster"
},
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"name": "cluster",
"query": "label_values(app_worker_threads_active,cluster)",
"refresh": 1,
"type": "query"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "utc",
"title": "Span Zero Demo Dashboard",
"uid": "span-zero-demo-dashboard",
"version": 0
}

View File

@ -0,0 +1,881 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": false,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [
{
"icon": "external link",
"targetBlank": true,
"title": "External Documentation",
"type": "link",
"url": "https://example.com/docs"
}
],
"panels": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"content": "This dashboard demonstrates various monitoring components for application observability and performance metrics.\n",
"mode": "markdown"
},
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Application Monitoring",
"type": "text"
},
{
"collapsed": false,
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 0
},
"id": 23,
"panels": [],
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Application Service",
"type": "row"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 1
},
"id": 6,
"options": {
"content": "This service handles background processing tasks for the application system. It manages various types of operations including data synchronization, resource management, and batch processing.\n\nSupported operation types:\n1. Sync: Synchronizes data between different systems\n2. Process: Handles batch data processing tasks\n3. Cleanup: Removes outdated or temporary resources\n4. Update: Applies configuration changes across services\n\nService dependencies:\n- Data API: For reading and writing application data\n- Configuration Service: For managing system settings\n- Queue Service: For handling task scheduling\n- Storage Service: For persistent data management\n- Auth Service: For authentication and authorization\n- Metrics Service: For collecting operational statistics\n",
"mode": "markdown"
},
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Service Overview",
"type": "text"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 1
},
"id": 7,
"options": {
"content": "Error monitoring helps identify issues in the system. This section displays error logs and success rates for operations.",
"mode": "markdown"
},
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Error Monitoring",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": 0
},
{
"color": "yellow",
"value": 0.95
},
{
"color": "green",
"value": 1
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 1
},
"id": 8,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "sum by (action) (app_jobs_processed_total{outcome=\"success\", cluster=\"$cluster\", namespace=\"default\"})\n/\nsum by (action) (app_jobs_processed_total{cluster=\"$cluster\", namespace=\"default\"})\n",
"legendFormat": "{{action}}",
"refId": "A"
}
],
"title": "Job Success Rate",
"type": "stat"
},
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 8
},
"id": 9,
"options": {
"enableLogDetails": true,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt | level=\"error\"",
"refId": "A"
}
],
"title": "Errors",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 8
},
"id": 10,
"options": {
"enableLogDetails": true,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt",
"refId": "A"
}
],
"title": "All",
"type": "logs"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 8
},
"id": 11,
"options": {
"content": "Performance monitoring examines factors that affect system response times, including operation duration, queue lengths, and processing delays. This section provides metrics and traces for performance analysis.\n",
"mode": "markdown"
},
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Performance Analysis",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Number of concurrent processing threads available for handling operations",
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 15
},
"id": 12,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "max(app_worker_threads_active{cluster=\"$cluster\", namespace=\"default\"})",
"instant": true,
"refId": "A"
}
],
"title": "Concurrent Job Drivers",
"type": "stat"
},
{
"datasource": {
"type": "tempo",
"uid": "${tempo}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 15
},
"id": 13,
"targets": [
{
"datasource": {
"type": "tempo",
"uid": "${tempo}"
},
"filters": [
{
"id": "span-name",
"operator": "=",
"scope": "span",
"tag": "name",
"value": [
"provisioning.sync.process"
]
},
{
"id": "k8s-cluster-name",
"operator": "=",
"scope": "resource",
"tag": "k8s.cluster.name",
"value": [
"$cluster"
]
}
],
"query": "{name=\"app.operation.process\"}",
"queryType": "traceqlSearch",
"refId": "A"
}
],
"title": "Recent Operation Traces",
"type": "table"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed",
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "yellow",
"value": 2
},
{
"color": "red",
"value": 5
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 15
},
"id": 14,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.9, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}",
"refId": "C"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}",
"refId": "D"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}",
"refId": "E"
}
],
"timeFrom": "7d",
"title": "7d avg of job durations",
"transformations": [
{
"id": "reduce",
"options": {
"mode": "seriesToRows",
"reducers": [
"mean"
]
}
},
{
"id": "seriesToRows"
},
{
"id": "organize",
"options": {
"renameByName": {
"Field": "Type",
"Mean": "Avg Duration",
"Metric": "Legend",
"Value": "Duration"
}
}
}
],
"type": "table"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed",
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 22
},
"id": 15,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.95, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}",
"refId": "C"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}",
"refId": "D"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}",
"refId": "E"
}
],
"title": "Job Duration",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Total number of jobs waiting to be processed",
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 22
},
"id": 16,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "clamp_min(sum(app_operation_queue_size{cluster=\"$cluster\", namespace=\"default\"}), 0)",
"legendFormat": "Queue size",
"refId": "A"
}
],
"title": "Queue Size",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"fieldConfig": {
"defaults": {
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 22
},
"id": 17,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "avg(histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le)))",
"legendFormat": "Queue size",
"refId": "A"
}
],
"timeFrom": "7d",
"title": "7d avg Queue Wait Time",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "How long a job is in the queue before being picked up",
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 29
},
"id": 18,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.99, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.99",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.95, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.95",
"refId": "C"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.5",
"refId": "D"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "histogram_quantile(0.1, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.1",
"refId": "E"
}
],
"title": "Queue Wait Time",
"type": "timeseries"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 29
},
"id": 19,
"options": {
"content": "Resource utilization monitoring for application containers",
"mode": "markdown"
},
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A"
}
],
"title": "Resource Monitoring",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 29
},
"id": 20,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "count by (cluster, channel)(label_replace(label_replace(kube_pod_container_info{namespace=\"default\", container=\"app-worker\", pod=~\"app-worker.*\", cluster=~\"$cluster\"}, \"version\", \"$1\", \"image\", \".+:(.+)\"), \"channel\", \"$1\", \"container\", \".+-(.+)\"))",
"legendFormat": "{{cluster}}",
"refId": "A"
}
],
"title": "Running Pod(s)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 36
},
"id": 21,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "max(kube_pod_container_resource_requests{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})",
"legendFormat": "Memory Request",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "max(kube_pod_container_resource_limits{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})",
"legendFormat": "Memory Limit",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "max(container_memory_usage_bytes{namespace=\"default\",cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"}) by (pod)",
"legendFormat": "Container usage {{pod}}",
"refId": "C"
}
],
"title": "Memory Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 36
},
"id": 22,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "sum(irate(container_cpu_usage_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container, cpu)",
"legendFormat": "Usage {{pod}}",
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "sum(irate(container_cpu_cfs_throttled_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container)",
"legendFormat": "Throttling {{pod}}",
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "max(kube_pod_container_resource_limits{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})",
"legendFormat": "CPU limit",
"refId": "C"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"expr": "max(kube_pod_container_resource_requests{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})",
"legendFormat": "CPU request",
"refId": "D"
}
],
"title": "CPU Utilization",
"type": "timeseries"
}
],
"refresh": "10s",
"schemaVersion": 42,
"tags": [
"as-code"
],
"templating": {
"list": [
{
"current": {
"value": "prometheus-datasource"
},
"hide": 0,
"label": "Data source",
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"value": "prometheus-datasource"
},
"name": "prom",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"value": "loki-datasource"
},
"name": "loki",
"options": [],
"query": "loki",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"text": "tempo-datasource",
"value": "tempo-datasource"
},
"name": "tempo",
"options": [],
"query": "tempo",
"refresh": 1,
"regex": ".*tempo.*",
"type": "datasource"
},
{
"current": {
"text": "demo-cluster",
"value": "demo-cluster"
},
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"name": "cluster",
"options": [],
"query": "label_values(app_worker_threads_active,cluster)",
"refresh": 1,
"type": "query"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "utc",
"title": "Span Zero Demo Dashboard",
"uid": "span-zero-demo-dashboard",
"weekStart": ""
}

View File

@ -0,0 +1,694 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": false,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [
{
"icon": "external link",
"targetBlank": true,
"title": "External Documentation",
"type": "link",
"url": "https://example.com/docs"
}
],
"panels": [
{
"gridPos": {
"h": 3,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"content": "This dashboard demonstrates various monitoring components for application observability and performance metrics.\n",
"mode": "markdown"
},
"title": "Application Monitoring",
"type": "text"
},
{
"collapsed": false,
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
"y": 0
},
"id": 23,
"panels": [],
"title": "Application Service",
"type": "row"
},
{
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 1
},
"id": 6,
"options": {
"content": "This service handles background processing tasks for the application system. It manages various types of operations including data synchronization, resource management, and batch processing.\n\nSupported operation types:\n1. Sync: Synchronizes data between different systems\n2. Process: Handles batch data processing tasks\n3. Cleanup: Removes outdated or temporary resources\n4. Update: Applies configuration changes across services\n\nService dependencies:\n- Data API: For reading and writing application data\n- Configuration Service: For managing system settings\n- Queue Service: For handling task scheduling\n- Storage Service: For persistent data management\n- Auth Service: For authentication and authorization\n- Metrics Service: For collecting operational statistics\n",
"mode": "markdown"
},
"title": "Service Overview",
"type": "text"
},
{
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 1
},
"id": 7,
"options": {
"content": "Error monitoring helps identify issues in the system. This section displays error logs and success rates for operations.",
"mode": "markdown"
},
"title": "Error Monitoring",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": 0
},
{
"color": "yellow",
"value": 0.95
},
{
"color": "green",
"value": 1
}
]
},
"unit": "percentunit"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 1
},
"id": 8,
"targets": [
{
"expr": "sum by (action) (app_jobs_processed_total{outcome=\"success\", cluster=\"$cluster\", namespace=\"default\"})\n/\nsum by (action) (app_jobs_processed_total{cluster=\"$cluster\", namespace=\"default\"})\n",
"legendFormat": "{{action}}",
"refId": "A"
}
],
"title": "Job Success Rate",
"type": "stat"
},
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 8
},
"id": 9,
"options": {
"enableLogDetails": true,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"targets": [
{
"expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt | level=\"error\"",
"refId": "A"
}
],
"title": "Errors",
"type": "logs"
},
{
"datasource": {
"type": "loki",
"uid": "${loki}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 8
},
"id": 10,
"options": {
"enableLogDetails": true,
"showTime": false,
"sortOrder": "Descending",
"wrapLogMessage": true
},
"targets": [
{
"expr": "{namespace=\"default\", cluster=\"$cluster\", job=\"app-service\"} | logfmt",
"refId": "A"
}
],
"title": "All",
"type": "logs"
},
{
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 8
},
"id": 11,
"options": {
"content": "Performance monitoring examines factors that affect system response times, including operation duration, queue lengths, and processing delays. This section provides metrics and traces for performance analysis.\n",
"mode": "markdown"
},
"title": "Performance Analysis",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Number of concurrent processing threads available for handling operations",
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 15
},
"id": 12,
"targets": [
{
"expr": "max(app_worker_threads_active{cluster=\"$cluster\", namespace=\"default\"})",
"instant": true,
"refId": "A"
}
],
"title": "Concurrent Job Drivers",
"type": "stat"
},
{
"datasource": {
"type": "tempo",
"uid": "${tempo}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 15
},
"id": 13,
"targets": [
{
"filters": [
{
"id": "span-name",
"operator": "=",
"scope": "span",
"tag": "name",
"value": [
"provisioning.sync.process"
]
},
{
"id": "k8s-cluster-name",
"operator": "=",
"scope": "resource",
"tag": "k8s.cluster.name",
"value": [
"$cluster"
]
}
],
"query": "{name=\"app.operation.process\"}",
"queryType": "traceqlSearch",
"refId": "A"
}
],
"title": "Recent Operation Traces",
"type": "table"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed",
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "yellow",
"value": 2
},
{
"color": "red",
"value": 5
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 15
},
"id": 14,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}",
"refId": "B"
},
{
"expr": "histogram_quantile(0.9, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}",
"refId": "C"
},
{
"expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}",
"refId": "D"
},
{
"expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le, resources_changed_bucket, action)) and on(resources_changed_bucket, action) sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (resources_changed_bucket, action) \u003e 0",
"legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}",
"refId": "E"
}
],
"timeFrom": "7d",
"title": "7d avg of job durations",
"transformations": [
{
"id": "reduce",
"options": {
"mode": "seriesToRows",
"reducers": [
"mean"
]
}
},
{
"id": "seriesToRows"
},
{
"id": "organize",
"options": {
"renameByName": {
"Field": "Type",
"Mean": "Avg Duration",
"Metric": "Legend",
"Value": "Duration"
}
}
}
],
"type": "table"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Histogram showing p99, p95, p50, and p10 percentiles for job processing duration based on number of resources changed",
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 22
},
"id": 15,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.99 - size {{resources_changed_bucket}}",
"refId": "B"
},
{
"expr": "histogram_quantile(0.95, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.95 - size {{resources_changed_bucket}}",
"refId": "C"
},
{
"expr": "histogram_quantile(0.5, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.5 - size {{resources_changed_bucket}}",
"refId": "D"
},
{
"expr": "histogram_quantile(0.1, sum(rate(app_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[5m])) by (le, resources_changed_bucket, action))",
"legendFormat": "{{action}} q0.1 - size {{resources_changed_bucket}}",
"refId": "E"
}
],
"title": "Job Duration",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "Total number of jobs waiting to be processed",
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 22
},
"id": 16,
"targets": [
{
"expr": "clamp_min(sum(app_operation_queue_size{cluster=\"$cluster\", namespace=\"default\"}), 0)",
"legendFormat": "Queue size",
"refId": "A"
}
],
"title": "Queue Size",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"fieldConfig": {
"defaults": {
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 22
},
"id": 17,
"targets": [
{
"expr": "avg(histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[7d])) by (le)))",
"legendFormat": "Queue size",
"refId": "A"
}
],
"timeFrom": "7d",
"title": "7d avg Queue Wait Time",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"description": "How long a job is in the queue before being picked up",
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 29
},
"id": 18,
"targets": [
{
"expr": "histogram_quantile(0.99, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.99",
"refId": "B"
},
{
"expr": "histogram_quantile(0.95, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.95",
"refId": "C"
},
{
"expr": "histogram_quantile(0.5, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.5",
"refId": "D"
},
{
"expr": "histogram_quantile(0.1, sum(rate(app_operation_queue_wait_seconds_bucket{cluster=\"$cluster\", namespace=\"default\"}[$__rate_interval])) by (le))",
"legendFormat": "q0.1",
"refId": "E"
}
],
"title": "Queue Wait Time",
"type": "timeseries"
},
{
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 29
},
"id": 19,
"options": {
"content": "Resource utilization monitoring for application containers",
"mode": "markdown"
},
"title": "Resource Monitoring",
"type": "text"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 16,
"y": 29
},
"id": 20,
"targets": [
{
"expr": "count by (cluster, channel)(label_replace(label_replace(kube_pod_container_info{namespace=\"default\", container=\"app-worker\", pod=~\"app-worker.*\", cluster=~\"$cluster\"}, \"version\", \"$1\", \"image\", \".+:(.+)\"), \"channel\", \"$1\", \"container\", \".+-(.+)\"))",
"legendFormat": "{{cluster}}",
"refId": "A"
}
],
"title": "Running Pod(s)",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 0,
"y": 36
},
"id": 21,
"targets": [
{
"expr": "max(kube_pod_container_resource_requests{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})",
"legendFormat": "Memory Request",
"refId": "A"
},
{
"expr": "max(kube_pod_container_resource_limits{namespace=\"default\", resource=\"memory\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"})",
"legendFormat": "Memory Limit",
"refId": "B"
},
{
"expr": "max(container_memory_usage_bytes{namespace=\"default\",cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker.*\"}) by (pod)",
"legendFormat": "Container usage {{pod}}",
"refId": "C"
}
],
"title": "Memory Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"gridPos": {
"h": 7,
"w": 8,
"x": 8,
"y": 36
},
"id": 22,
"targets": [
{
"expr": "sum(irate(container_cpu_usage_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container, cpu)",
"legendFormat": "Usage {{pod}}",
"refId": "A"
},
{
"expr": "sum(irate(container_cpu_cfs_throttled_seconds_total{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\"}[$__rate_interval])) by (pod, container)",
"legendFormat": "Throttling {{pod}}",
"refId": "B"
},
{
"expr": "max(kube_pod_container_resource_limits{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})",
"legendFormat": "CPU limit",
"refId": "C"
},
{
"expr": "max(kube_pod_container_resource_requests{namespace=\"default\", cluster=~\"$cluster\", container=\"app-worker\", pod=~\"app-worker-.*\", resource=\"cpu\"})",
"legendFormat": "CPU request",
"refId": "D"
}
],
"title": "CPU Utilization",
"type": "timeseries"
}
],
"refresh": "10s",
"schemaVersion": 16,
"tags": [
"as-code"
],
"templating": {
"list": [
{
"current": {
"value": "prometheus-datasource"
},
"hide": 0,
"label": "Data source",
"name": "datasource",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"value": "prometheus-datasource"
},
"name": "prom",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"value": "loki-datasource"
},
"name": "loki",
"options": [],
"query": "loki",
"refresh": 1,
"regex": "",
"type": "datasource"
},
{
"current": {
"text": "tempo-datasource",
"value": "tempo-datasource"
},
"name": "tempo",
"options": [],
"query": "tempo",
"refresh": 1,
"regex": ".*tempo.*",
"type": "datasource"
},
{
"current": {
"text": "demo-cluster",
"value": "demo-cluster"
},
"datasource": {
"type": "prometheus",
"uid": "${prom}"
},
"name": "cluster",
"options": [],
"query": "label_values(app_worker_threads_active,cluster)",
"refresh": 1,
"type": "query"
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "utc",
"title": "Span Zero Demo Dashboard",
"uid": "span-zero-demo-dashboard",
"weekStart": ""
}

View File

@ -5,6 +5,7 @@ go 1.24.6
require ( require (
github.com/grafana/grafana-app-sdk v0.46.0 github.com/grafana/grafana-app-sdk v0.46.0
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250804150913-990f1c69ecc2 github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250804150913-990f1c69ecc2
github.com/stretchr/testify v1.11.1
k8s.io/apimachinery v0.34.1 k8s.io/apimachinery v0.34.1
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
) )
@ -42,7 +43,6 @@ require (
github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.16.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.11.1 // indirect
github.com/woodsbury/decimal128 v1.3.0 // indirect github.com/woodsbury/decimal128 v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect

View File

@ -0,0 +1,80 @@
package v1alpha1
import (
"slices"
"strings"
)
func (stars *StarsSpec) Add(group, kind, name string) {
for i, r := range stars.Resource {
if r.Group == group && r.Kind == kind {
r.Names = append(r.Names, name)
slices.Sort(r.Names)
stars.Resource[i].Names = slices.Compact(r.Names)
return
}
}
// Add the resource kind
stars.Resource = append(stars.Resource, StarsResource{
Group: group,
Kind: kind,
Names: []string{name},
})
stars.Normalize()
}
func (stars *StarsSpec) Remove(group, kind, name string) {
for i, r := range stars.Resource {
if r.Group == group && r.Kind == kind {
idx := slices.Index(r.Names, name)
if idx < 0 {
return // does not exist
}
r.Names = append(r.Names[:idx], r.Names[idx+1:]...)
stars.Resource[i].Names = r.Names
if len(r.Names) == 0 {
stars.Normalize()
}
return
}
}
}
// Makes sure everything is in sorted order
func (stars *StarsSpec) Normalize() {
resources := make([]StarsResource, 0, len(stars.Resource))
for _, r := range stars.Resource {
if len(r.Names) > 0 {
slices.Sort(r.Names)
r.Names = slices.Compact(r.Names) // removes any duplicates
resources = append(resources, r)
}
}
slices.SortFunc(resources, func(a StarsResource, b StarsResource) int {
return strings.Compare(a.Group+a.Kind, b.Group+b.Kind)
})
if len(resources) == 0 {
resources = nil
}
stars.Resource = resources
}
func Changes(current []string, target []string) (added []string, removed []string, same []string) {
lookup := map[string]bool{}
for _, k := range current {
lookup[k] = true
}
for _, k := range target {
if lookup[k] {
same = append(same, k)
delete(lookup, k)
} else {
added = append(added, k)
}
}
for k := range lookup {
removed = append(removed, k)
}
return
}

View File

@ -0,0 +1,235 @@
package v1alpha1
import (
"testing"
"github.com/stretchr/testify/require"
)
type starItem struct {
group string
kind string
name string
}
func TestStarsWrite(t *testing.T) {
t.Run("apply", func(t *testing.T) {
tests := []struct {
name string
spec *StarsSpec
item starItem
remove bool
expect *StarsSpec
}{{
name: "add to an existing array",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "x"},
}},
},
item: starItem{
group: "g",
kind: "k",
name: "c",
},
remove: false,
expect: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c", "x"}, // added "b" (and sorted)
}},
},
}, {
name: "remove from an existing array",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c"},
}},
},
item: starItem{
group: "g",
kind: "k",
name: "b",
},
remove: true,
expect: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "c"}, // removed "b"
}},
},
}, {
name: "add to empty spec",
spec: &StarsSpec{},
item: starItem{
group: "g",
kind: "k",
name: "a",
},
remove: false,
expect: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a"},
}},
},
}, {
name: "remove item that does not exist",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"x"},
}},
},
item: starItem{
group: "g",
kind: "k",
name: "a",
},
remove: true,
}, {
name: "add item that already exist",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"x"},
}},
},
item: starItem{
group: "g",
kind: "k",
name: "x",
},
remove: false,
}, {
name: "remove from empty",
spec: &StarsSpec{},
item: starItem{
group: "g",
kind: "k",
name: "a",
},
remove: true,
}, {
name: "remove item that does not exist",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c"},
}},
},
item: starItem{
group: "g",
kind: "k",
name: "X",
},
remove: true,
}, {
name: "remove last item",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a"},
}},
},
item: starItem{
group: "g",
kind: "k",
name: "a",
},
remove: true,
expect: &StarsSpec{}, // empty object
}, {
name: "remove last item (with others)",
spec: &StarsSpec{
Resource: []StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a"},
}, {
Group: "g2",
Kind: "k2",
Names: []string{"a"},
}}},
item: starItem{
group: "g",
kind: "k",
name: "a",
},
remove: true,
expect: &StarsSpec{
Resource: []StarsResource{{
Group: "g2",
Kind: "k2",
Names: []string{"a"},
}}},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.expect == nil {
tt.expect = tt.spec.DeepCopy()
}
if tt.remove {
tt.spec.Remove(tt.item.group, tt.item.kind, tt.item.name)
} else {
tt.spec.Add(tt.item.group, tt.item.kind, tt.item.name)
}
require.Equal(t, tt.expect, tt.spec)
})
}
})
t.Run("changes", func(t *testing.T) {
tests := []struct {
name string
current []string
target []string
added []string
removed []string
same []string
}{{
name: "same",
current: []string{"a"},
target: []string{"a"},
same: []string{"a"},
}, {
name: "adding one",
current: []string{"a"},
target: []string{"a", "b"},
same: []string{"a"},
added: []string{"b"},
}, {
name: "removing one",
current: []string{"a", "b"},
target: []string{"a"},
same: []string{"a"},
removed: []string{"b"},
}, {
name: "removed to empty",
current: []string{"a"},
target: []string{},
removed: []string{"a"},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a, r, s := Changes(tt.current, tt.target)
require.Equal(t, tt.added, a, "added")
require.Equal(t, tt.removed, r, "removed")
require.Equal(t, tt.same, s, "same")
})
}
})
}

42
apps/scope/go.mod Normal file
View File

@ -0,0 +1,42 @@
module github.com/grafana/grafana/apps/scope
go 1.24.6
require (
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20251007081214-26e147d01f0a
k8s.io/apimachinery v0.34.1
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
)
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.11.1 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/net v0.44.0 // indirect
golang.org/x/text v0.29.0 // indirect
google.golang.org/protobuf v1.36.9 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)

118
apps/scope/go.sum Normal file
View File

@ -0,0 +1,118 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20251007081214-26e147d01f0a h1:L7xgV9mP6MRF3L2/vDOjNR7heaBPbXPMGTDN9/jXSFQ=
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20251007081214-26e147d01f0a/go.mod h1:OK8NwS87D5YphchOcAsiIWk/feMZ0EzfAGME1Kff860=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@ -0,0 +1,6 @@
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
// +groupName=scope.grafana.app
package v0alpha1 // import "github.com/grafana/grafana/apps/pkg/apis/scope/v0alpha1"

View File

@ -0,0 +1,168 @@
package v0alpha1
import (
"fmt"
"time"
"github.com/grafana/grafana/pkg/apimachinery/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
GROUP = "scope.grafana.app"
VERSION = "v0alpha1"
APIVERSION = GROUP + "/" + VERSION
)
var ScopeResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
"scopes", "scope", "Scope",
func() runtime.Object { return &Scope{} },
func() runtime.Object { return &ScopeList{} },
utils.TableColumns{
Definition: []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name"},
{Name: "Created At", Type: "date"},
{Name: "Title", Type: "string"},
{Name: "Filters", Type: "array"},
},
Reader: func(obj any) ([]interface{}, error) {
m, ok := obj.(*Scope)
if !ok {
return nil, fmt.Errorf("expected scope")
}
return []interface{}{
m.Name,
m.CreationTimestamp.UTC().Format(time.RFC3339),
m.Spec.Title,
m.Spec.Filters,
}, nil
},
}, // default table converter
)
var ScopeDashboardBindingResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
"scopedashboardbindings", "scopedashboardbinding", "ScopeDashboardBinding",
func() runtime.Object { return &ScopeDashboardBinding{} },
func() runtime.Object { return &ScopeDashboardBindingList{} },
utils.TableColumns{
Definition: []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name"},
{Name: "Created At", Type: "date"},
{Name: "Dashboard", Type: "string"},
{Name: "Scope", Type: "string"},
},
Reader: func(obj any) ([]interface{}, error) {
m, ok := obj.(*ScopeDashboardBinding)
if !ok {
return nil, fmt.Errorf("expected scope dashboard binding")
}
return []interface{}{
m.Name,
m.CreationTimestamp.UTC().Format(time.RFC3339),
m.Spec.Dashboard,
m.Spec.Scope,
}, nil
},
},
)
var ScopeNavigationResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
"scopenavigations", "scopenavigation", "ScopeNavigation",
func() runtime.Object { return &ScopeNavigation{} },
func() runtime.Object { return &ScopeNavigationList{} },
utils.TableColumns{
Definition: []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name"},
{Name: "Created At", Type: "date"},
{Name: "URL", Type: "string"},
{Name: "Scope", Type: "string"},
},
Reader: func(obj any) ([]interface{}, error) {
m, ok := obj.(*ScopeNavigation)
if !ok {
return nil, fmt.Errorf("expected scope navigation")
}
return []interface{}{
m.Name,
m.CreationTimestamp.UTC().Format(time.RFC3339),
m.Spec.URL,
m.Spec.Scope,
}, nil
},
},
)
var ScopeNodeResourceInfo = utils.NewResourceInfo(GROUP, VERSION,
"scopenodes", "scopenode", "ScopeNode",
func() runtime.Object { return &ScopeNode{} },
func() runtime.Object { return &ScopeNodeList{} },
utils.TableColumns{
Definition: []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name"},
{Name: "Created At", Type: "date"},
{Name: "Title", Type: "string"},
{Name: "Parent Name", Type: "string"},
{Name: "Node Type", Type: "string"},
{Name: "Link Type", Type: "string"},
{Name: "Link ID", Type: "string"},
},
Reader: func(obj any) ([]interface{}, error) {
m, ok := obj.(*ScopeNode)
if !ok {
return nil, fmt.Errorf("expected scope node")
}
return []interface{}{
m.Name,
m.CreationTimestamp.UTC().Format(time.RFC3339),
m.Spec.Title,
m.Spec.ParentName,
m.Spec.NodeType,
m.Spec.LinkType,
m.Spec.LinkID,
}, nil
},
}, // default table converter
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: GROUP, Version: VERSION}
InternalGroupVersion = schema.GroupVersion{Group: GROUP, Version: runtime.APIVersionInternal}
// SchemaBuilder is used by standard codegen
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
localSchemeBuilder.Register(func(s *runtime.Scheme) error {
return AddKnownTypes(SchemeGroupVersion, s)
})
}
// Adds the list of known types to the given scheme.
func AddKnownTypes(gv schema.GroupVersion, scheme *runtime.Scheme) error {
scheme.AddKnownTypes(gv,
&Scope{},
&ScopeList{},
&ScopeDashboardBinding{},
&ScopeDashboardBindingList{},
&ScopeNode{},
&ScopeNodeList{},
&FindScopeNodeChildrenResults{},
&FindScopeDashboardBindingsResults{},
&ScopeNavigation{},
&ScopeNavigationList{},
&FindScopeNavigationsResults{},
)
//metav1.AddToGroupVersion(scheme, gv)
return nil
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

View File

@ -0,0 +1,238 @@
package v0alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
/*
Please keep pkg/promlib/models/query.go and pkg/promlib/models/scope.go in sync
with this file until this package is out of the grafana/grafana module.
*/
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Scope struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeSpec `json:"spec,omitempty"`
}
type ScopeSpec struct {
Title string `json:"title"`
// Provides a default path for the scope. This refers to a list of nodes in the selector. This is used to display the title next to the selected scope and expand the selector to the proper path.
// This will override whichever is selected from in the selector.
// The path is a list of node ids, starting at the direct parent of the selected node towards the root.
// +listType=atomic
DefaultPath []string `json:"defaultPath,omitempty"`
// +listType=atomic
Filters []ScopeFilter `json:"filters,omitempty"`
}
type ScopeFilter struct {
Key string `json:"key"`
Value string `json:"value"`
// Values is used for operators that require multiple values (e.g. one-of and not-one-of).
// +listType=atomic
Values []string `json:"values,omitempty"`
Operator FilterOperator `json:"operator"`
}
// Type of the filter operator.
// +enum
type FilterOperator string
// Defines values for FilterOperator.
const (
FilterOperatorEquals FilterOperator = "equals"
FilterOperatorNotEquals FilterOperator = "not-equals"
FilterOperatorRegexMatch FilterOperator = "regex-match"
FilterOperatorRegexNotMatch FilterOperator = "regex-not-match"
FilterOperatorOneOf FilterOperator = "one-of"
FilterOperatorNotOneOf FilterOperator = "not-one-of"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Scope `json:"items,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeDashboardBinding struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeDashboardBindingSpec `json:"spec,omitempty"`
Status ScopeDashboardBindingStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeDashboardBindingList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScopeDashboardBinding `json:"items,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type FindScopeDashboardBindingsResults struct {
metav1.TypeMeta `json:",inline"`
Items []ScopeDashboardBinding `json:"items,omitempty"`
Message string `json:"message,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeNode struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeNodeSpec `json:"spec,omitempty"`
}
type ScopeDashboardBindingSpec struct {
Dashboard string `json:"dashboard"`
Scope string `json:"scope"`
}
// Type of the item.
// +enum
// ScopeDashboardBindingStatus contains derived information about a ScopeDashboardBinding.
type ScopeDashboardBindingStatus struct {
// DashboardTitle should be populated and update from the dashboard
DashboardTitle string `json:"dashboardTitle"`
// Groups is used for the grouping of dashboards that are suggested based
// on a scope. The source of truth for this information has not been
// determined yet.
Groups []string `json:"groups,omitempty"`
// DashboardTitleConditions is a list of conditions that are used to determine if the dashboard title is valid.
// +optional
// +listType=map
// +listMapKey=type
DashboardTitleConditions []metav1.Condition `json:"dashboardTitleConditions,omitempty"`
// DashboardTitleConditions is a list of conditions that are used to determine if the list of groups is valid.
// +optional
// +listType=map
// +listMapKey=type
GroupsConditions []metav1.Condition `json:"groupsConditions,omitempty"`
}
type NodeType string
// Defines values for ItemType.
const (
NodeTypeContainer NodeType = "container"
NodeTypeLeaf NodeType = "leaf"
)
// Type of the item.
// +enum
type LinkType string
// Defines values for ItemType.
const (
LinkTypeScope LinkType = "scope"
)
type ScopeNodeSpec struct {
//+optional
ParentName string `json:"parentName,omitempty"`
NodeType NodeType `json:"nodeType"` // container | leaf
Title string `json:"title"`
Description string `json:"description,omitempty"`
DisableMultiSelect bool `json:"disableMultiSelect"`
LinkType LinkType `json:"linkType,omitempty"` // scope (later more things)
LinkID string `json:"linkId,omitempty"` // the k8s name
// ?? should this be a slice of links
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScopeNode `json:"items,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type FindScopeNodeChildrenResults struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScopeNode `json:"items,omitempty"`
}
// Scoped navigation types
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeNavigation struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScopeNavigationSpec `json:"spec,omitempty"`
Status ScopeNavigationStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ScopeNavigationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ScopeNavigation `json:"items,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type FindScopeNavigationsResults struct {
metav1.TypeMeta `json:",inline"`
Items []ScopeNavigation `json:"items,omitempty"`
Message string `json:"message,omitempty"`
}
type ScopeNavigationSpec struct {
URL string `json:"url"`
Scope string `json:"scope"`
}
// Type of the item.
// +enum
// ScopeNavigationStatus contains derived information about a ScopeNavigation.
type ScopeNavigationStatus struct {
// Title should be populated and update from the dashboard
Title string `json:"title"`
// Groups is used for the grouping of dashboards that are suggested based
// on a scope. The source of truth for this information has not been
// determined yet.
Groups []string `json:"groups,omitempty"`
// TitleConditions is a list of conditions that are used to determine if the title is valid.
// +optional
// +listType=map
// +listMapKey=type
TitleConditions []metav1.Condition `json:"titleConditions,omitempty"`
// GroupsConditions is a list of conditions that are used to determine if the list of groups is valid.
// +optional
// +listType=map
// +listMapKey=type
GroupsConditions []metav1.Condition `json:"groupsConditions,omitempty"`
}
// Type of the filter operator.
// +enum
type ScopeNavigationLinkType string
// Defines values for FilterOperator.
const (
ScopeNavigationLinkTypeURL ScopeNavigationLinkType = "url"
)

View File

@ -0,0 +1,519 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: AGPL-3.0-only
// Code generated by deepcopy-gen. DO NOT EDIT.
package v0alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FindScopeDashboardBindingsResults) DeepCopyInto(out *FindScopeDashboardBindingsResults) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeDashboardBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindScopeDashboardBindingsResults.
func (in *FindScopeDashboardBindingsResults) DeepCopy() *FindScopeDashboardBindingsResults {
if in == nil {
return nil
}
out := new(FindScopeDashboardBindingsResults)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FindScopeDashboardBindingsResults) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FindScopeNavigationsResults) DeepCopyInto(out *FindScopeNavigationsResults) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeNavigation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindScopeNavigationsResults.
func (in *FindScopeNavigationsResults) DeepCopy() *FindScopeNavigationsResults {
if in == nil {
return nil
}
out := new(FindScopeNavigationsResults)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FindScopeNavigationsResults) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FindScopeNodeChildrenResults) DeepCopyInto(out *FindScopeNodeChildrenResults) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FindScopeNodeChildrenResults.
func (in *FindScopeNodeChildrenResults) DeepCopy() *FindScopeNodeChildrenResults {
if in == nil {
return nil
}
out := new(FindScopeNodeChildrenResults)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FindScopeNodeChildrenResults) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Scope) DeepCopyInto(out *Scope) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scope.
func (in *Scope) DeepCopy() *Scope {
if in == nil {
return nil
}
out := new(Scope)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Scope) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDashboardBinding) DeepCopyInto(out *ScopeDashboardBinding) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBinding.
func (in *ScopeDashboardBinding) DeepCopy() *ScopeDashboardBinding {
if in == nil {
return nil
}
out := new(ScopeDashboardBinding)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeDashboardBinding) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDashboardBindingList) DeepCopyInto(out *ScopeDashboardBindingList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeDashboardBinding, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingList.
func (in *ScopeDashboardBindingList) DeepCopy() *ScopeDashboardBindingList {
if in == nil {
return nil
}
out := new(ScopeDashboardBindingList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeDashboardBindingList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDashboardBindingSpec) DeepCopyInto(out *ScopeDashboardBindingSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingSpec.
func (in *ScopeDashboardBindingSpec) DeepCopy() *ScopeDashboardBindingSpec {
if in == nil {
return nil
}
out := new(ScopeDashboardBindingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeDashboardBindingStatus) DeepCopyInto(out *ScopeDashboardBindingStatus) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DashboardTitleConditions != nil {
in, out := &in.DashboardTitleConditions, &out.DashboardTitleConditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GroupsConditions != nil {
in, out := &in.GroupsConditions, &out.GroupsConditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDashboardBindingStatus.
func (in *ScopeDashboardBindingStatus) DeepCopy() *ScopeDashboardBindingStatus {
if in == nil {
return nil
}
out := new(ScopeDashboardBindingStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeFilter) DeepCopyInto(out *ScopeFilter) {
*out = *in
if in.Values != nil {
in, out := &in.Values, &out.Values
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeFilter.
func (in *ScopeFilter) DeepCopy() *ScopeFilter {
if in == nil {
return nil
}
out := new(ScopeFilter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeList) DeepCopyInto(out *ScopeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Scope, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeList.
func (in *ScopeList) DeepCopy() *ScopeList {
if in == nil {
return nil
}
out := new(ScopeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNavigation) DeepCopyInto(out *ScopeNavigation) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigation.
func (in *ScopeNavigation) DeepCopy() *ScopeNavigation {
if in == nil {
return nil
}
out := new(ScopeNavigation)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeNavigation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNavigationList) DeepCopyInto(out *ScopeNavigationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeNavigation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigationList.
func (in *ScopeNavigationList) DeepCopy() *ScopeNavigationList {
if in == nil {
return nil
}
out := new(ScopeNavigationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeNavigationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNavigationSpec) DeepCopyInto(out *ScopeNavigationSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigationSpec.
func (in *ScopeNavigationSpec) DeepCopy() *ScopeNavigationSpec {
if in == nil {
return nil
}
out := new(ScopeNavigationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNavigationStatus) DeepCopyInto(out *ScopeNavigationStatus) {
*out = *in
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TitleConditions != nil {
in, out := &in.TitleConditions, &out.TitleConditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GroupsConditions != nil {
in, out := &in.GroupsConditions, &out.GroupsConditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNavigationStatus.
func (in *ScopeNavigationStatus) DeepCopy() *ScopeNavigationStatus {
if in == nil {
return nil
}
out := new(ScopeNavigationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNode) DeepCopyInto(out *ScopeNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNode.
func (in *ScopeNode) DeepCopy() *ScopeNode {
if in == nil {
return nil
}
out := new(ScopeNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeNode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNodeList) DeepCopyInto(out *ScopeNodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScopeNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNodeList.
func (in *ScopeNodeList) DeepCopy() *ScopeNodeList {
if in == nil {
return nil
}
out := new(ScopeNodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScopeNodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeNodeSpec) DeepCopyInto(out *ScopeNodeSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeNodeSpec.
func (in *ScopeNodeSpec) DeepCopy() *ScopeNodeSpec {
if in == nil {
return nil
}
out := new(ScopeNodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScopeSpec) DeepCopyInto(out *ScopeSpec) {
*out = *in
if in.DefaultPath != nil {
in, out := &in.DefaultPath, &out.DefaultPath
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
*out = make([]ScopeFilter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSpec.
func (in *ScopeSpec) DeepCopy() *ScopeSpec {
if in == nil {
return nil
}
out := new(ScopeSpec)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,19 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: AGPL-3.0-only
// Code generated by defaulter-gen. DO NOT EDIT.
package v0alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@ -0,0 +1,934 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// SPDX-License-Identifier: AGPL-3.0-only
// Code generated by openapi-gen. DO NOT EDIT.
package v0alpha1
import (
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.FindScopeDashboardBindingsResults": schema_pkg_apis_scope_v0alpha1_FindScopeDashboardBindingsResults(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.FindScopeNavigationsResults": schema_pkg_apis_scope_v0alpha1_FindScopeNavigationsResults(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.FindScopeNodeChildrenResults": schema_pkg_apis_scope_v0alpha1_FindScopeNodeChildrenResults(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.Scope": schema_pkg_apis_scope_v0alpha1_Scope(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBinding(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingList": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingList(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus": schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingStatus(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeFilter": schema_pkg_apis_scope_v0alpha1_ScopeFilter(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeList": schema_pkg_apis_scope_v0alpha1_ScopeList(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation": schema_pkg_apis_scope_v0alpha1_ScopeNavigation(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationList": schema_pkg_apis_scope_v0alpha1_ScopeNavigationList(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationSpec": schema_pkg_apis_scope_v0alpha1_ScopeNavigationSpec(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationStatus": schema_pkg_apis_scope_v0alpha1_ScopeNavigationStatus(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode": schema_pkg_apis_scope_v0alpha1_ScopeNode(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeList": schema_pkg_apis_scope_v0alpha1_ScopeNodeList(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeSpec": schema_pkg_apis_scope_v0alpha1_ScopeNodeSpec(ref),
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeSpec": schema_pkg_apis_scope_v0alpha1_ScopeSpec(ref),
}
}
func schema_pkg_apis_scope_v0alpha1_FindScopeDashboardBindingsResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding"),
},
},
},
},
},
"message": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding"},
}
}
func schema_pkg_apis_scope_v0alpha1_FindScopeNavigationsResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation"),
},
},
},
},
},
"message": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation"},
}
}
func schema_pkg_apis_scope_v0alpha1_FindScopeNodeChildrenResults(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_Scope(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBinding(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingSpec", "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBindingStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeDashboardBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"dashboard": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"scope": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"dashboard", "scope"},
},
},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeDashboardBindingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Type of the item. ScopeDashboardBindingStatus contains derived information about a ScopeDashboardBinding.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"dashboardTitle": {
SchemaProps: spec.SchemaProps{
Description: "DashboardTitle should be populated and update from the dashboard",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"groups": {
SchemaProps: spec.SchemaProps{
Description: "Groups is used for the grouping of dashboards that are suggested based on a scope. The source of truth for this information has not been determined yet.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"dashboardTitleConditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"type",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{
Description: "DashboardTitleConditions is a list of conditions that are used to determine if the dashboard title is valid.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
},
},
},
},
},
"groupsConditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"type",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{
Description: "DashboardTitleConditions is a list of conditions that are used to determine if the list of groups is valid.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
},
},
},
},
},
},
Required: []string{"dashboardTitle"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeFilter(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"value": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"values": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Values is used for operators that require multiple values (e.g. one-of and not-one-of).",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "Possible enum values:\n - `\"equals\"`\n - `\"not-equals\"`\n - `\"not-one-of\"`\n - `\"one-of\"`\n - `\"regex-match\"`\n - `\"regex-not-match\"`",
Default: "",
Type: []string{"string"},
Format: "",
Enum: []interface{}{"equals", "not-equals", "not-one-of", "one-of", "regex-match", "regex-not-match"},
},
},
},
Required: []string{"key", "value", "operator"},
},
},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.Scope"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.Scope", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNavigation(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationStatus"),
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationSpec", "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigationStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNavigationList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNavigation", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNavigationSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"url": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"scope": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"url", "scope"},
},
},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNavigationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Type of the item. ScopeNavigationStatus contains derived information about a ScopeNavigation.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"title": {
SchemaProps: spec.SchemaProps{
Description: "Title should be populated and update from the dashboard",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"groups": {
SchemaProps: spec.SchemaProps{
Description: "Groups is used for the grouping of dashboards that are suggested based on a scope. The source of truth for this information has not been determined yet.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"titleConditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"type",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{
Description: "TitleConditions is a list of conditions that are used to determine if the title is valid.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
},
},
},
},
},
"groupsConditions": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-map-keys": []interface{}{
"type",
},
"x-kubernetes-list-type": "map",
},
},
SchemaProps: spec.SchemaProps{
Description: "GroupsConditions is a list of conditions that are used to determine if the list of groups is valid.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
},
},
},
},
},
},
Required: []string{"title"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNode(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeSpec"),
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNodeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNodeList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeNode", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeNodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"parentName": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"nodeType": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"title": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"disableMultiSelect": {
SchemaProps: spec.SchemaProps{
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"linkType": {
SchemaProps: spec.SchemaProps{
Description: "Possible enum values:\n - `\"scope\"`",
Type: []string{"string"},
Format: "",
Enum: []interface{}{"scope"},
},
},
"linkId": {
SchemaProps: spec.SchemaProps{
Description: "scope (later more things)",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"nodeType", "title", "disableMultiSelect"},
},
},
}
}
func schema_pkg_apis_scope_v0alpha1_ScopeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"title": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"defaultPath": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "Provides a default path for the scope. This refers to a list of nodes in the selector. This is used to display the title next to the selected scope and expand the selector to the proper path. This will override whichever is selected from in the selector. The path is a list of node ids, starting at the direct parent of the selected node towards the root.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"filters": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeFilter"),
},
},
},
},
},
},
Required: []string{"title"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1.ScopeFilter"},
}
}

View File

@ -0,0 +1,10 @@
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,FindScopeDashboardBindingsResults,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,FindScopeNavigationsResults,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeDashboardBindingStatus,Groups
API rule violation: list_type_missing,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNavigationStatus,Groups
API rule violation: names_match,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNodeSpec,LinkID
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,FindScopeNodeChildrenResults,Items
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeDashboardBindingList,Items
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeList,Items
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNavigationList,Items
API rule violation: streaming_list_type_json_tags,github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1,ScopeNodeList,Items

View File

@ -14,7 +14,30 @@ Once your feature toggle is defined, you can then wrap your feature around a che
Examples: Examples:
- [Backend](https://github.com/grafana/grafana/blob/feb2b5878b3e3ec551d64872c35edec2a0187812/pkg/services/authn/clients/session.go#L57): Use the `IsEnabled` function and pass in your feature toggle. - [Backend](https://github.com/grafana/grafana/blob/feb2b5878b3e3ec551d64872c35edec2a0187812/pkg/services/authn/clients/session.go#L57): Use the `IsEnabled` function and pass in your feature toggle.
- [Frontend](https://github.com/grafana/grafana/blob/feb2b5878b3e3ec551d64872c35edec2a0187812/public/app/features/search/service/folders.ts#L14): Check the config for your feature toggle.
### Frontend
Use the new OpenFeature-based feature flag client for all new feature flags. There are some differences compared to the legacy `config.featureToggles` system:
- Feature flag initialisation is async, but will be finished by the time the UI is rendered. This means you cannot get the value of a feature flag at the 'top level' of a module/file
- Call `evaluateBooleanFlag("flagName")` from `@grafana/runtime/internal` instead to get the value of a feature flag
- Feature flag values _may_ change over the lifetime of the session. Do not store the value in a variable that is used for longer than a single render - always call `evaluateBooleanFlag` lazily when you use the value.
e.g.
```ts
import { evaluateBooleanFlag } from '@grafana/runtime/internal';
// BAD - Don't do this. The feature toggle will not evaluate correctly
const isEnabled = evaluateBooleanFlag('newPreferences', false);
function makeAPICall() {
// GOOD - The feature toggle should be called after app initialisation
if (evaluateBooleanFlag('newPreferences', false)) {
// do new things
}
}
```
## Enabling toggles in development ## Enabling toggles in development

View File

@ -174,7 +174,7 @@ resource "grafana_role" "my_new_role" {
description = "My test role" description = "My test role"
version = 1 version = 1
uid = "newroleuid" uid = "newroleuid"
global = true global = false
permissions { permissions {
action = "org.users:add" action = "org.users:add"

View File

@ -156,7 +156,7 @@ On this screen, you can see:
- The earliest time a user has been active in a dashboard - The earliest time a user has been active in a dashboard
- When they last accessed a shared dashboard - When they last accessed a shared dashboard
- The dashboards to they have access - The dashboards they have access to
- Their role - Their role
You can also revoke a user's access to all shared dashboards on from this tab. You can also revoke a user's access to all shared dashboards on from this tab.

View File

@ -0,0 +1,347 @@
---
aliases:
- ../data-sources/prometheus/
- ../features/datasources/prometheus/
description: Guide for authenticating with Amazon Managed Service for Prometheus in Grafana
keywords:
- grafana
- prometheus
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Authenticating with SigV4
title: Configure the Prometheus data source
weight: 200
refs:
intro-to-prometheus:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
exemplars:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
configure-data-links-value-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
alerting-alert-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/
add-a-data-source:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
prom-query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
default-manage-alerts-ui-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
provision-grafana:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
manage-alerts-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
manage-recording-rules-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
private-data-source-connect:
- pattern: /docs/grafana/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
- pattern: /docs/grafana-cloud/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
configure-pdc:
- pattern: /docs/grafana/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
azure-active-directory:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
configure-grafana-configuration-file-location:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
grafana-managed-recording-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
---
# Connect to Amazon Managed Service for Prometheus
1. In the data source configuration page, locate the **Auth** section
2. Enable **SigV4 auth**
3. Configure the following settings:
| Setting | Description | Example |
| --------------------------- | ---------------------------------------------- | --------------------------------------------------------------- |
| **Authentication Provider** | Choose your auth method | `AWS SDK Default`, `Access & secret key`, or `Credentials file` |
| **Default Region** | AWS region for your workspace | `us-west-2` |
| **Access Key ID** | Your AWS access key (if using access key auth) | `AKIA...` |
| **Secret Access Key** | Your AWS secret key (if using access key auth) | `wJalrXUtn...` |
| **Assume Role ARN** | IAM role ARN (optional) | `arn:aws:iam::123456789:role/GrafanaRole` |
4. Set the **HTTP URL** to your Amazon Managed Service for Prometheus workspace endpoint: `https://aps-workspaces.us-west-2.amazonaws.com/workspaces/ws-12345678-1234-1234-1234-123456789012/`
5. Click **Save & test** to verify the connection
## Example configuration
```yaml
# Example provisioning configuration
apiVersion: 1
datasources:
- name: 'Amazon Managed Prometheus'
type: 'grafana-amazonprometheus-datasource'
url: 'https://aps-workspaces.us-west-2.amazonaws.com/workspaces/ws-12345678-1234-1234-1234-123456789012/'
jsonData:
httpMethod: 'POST'
sigV4Auth: true
sigV4AuthType: 'keys'
sigV4Region: 'us-east-2'
secureJsonData:
sigV4AccessKey: '<access key>'
sigV4SecretKey: '<secret key>'
```
## Migrate to Amazon Managed Service for Prometheus
Learn more about why this is happening: [Prometheus data source update: Redefining our big tent philosophy](https://grafana.com/blog/2025/06/16/prometheus-data-source-update-redefining-our-big-tent-philosophy/)
Before you begin, ensure you have the organization administrator role. If you are self-hosting Grafana, back up your existing dashboard configurations and queries.
Grafana Cloud users will be automatically migrated to the relevant version of Prometheus, so no action needs to be taken.
For air-gapped environments, download and install [Amazon Managed Service for Prometheus](https://grafana.com/grafana/plugins/grafana-amazonprometheus-datasource/), then follow the standard migration process.
### Migrate
1. Enable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
2. Restart Grafana for the changes to take effect.
{{< admonition type="note" >}}
This feature toggle will be removed in Grafana 13, and the migration will be automatic.
{{< /admonition >}}
### Check migration status
To determine if your Prometheus data sources have been migrated:
1. Navigate to **Connections** > **Data sources**
2. Select your Prometheus data source
3. Look for a migration banner at the top of the configuration page
The banner displays one of the following messages:
- **"Migration Notice"** - The data source has already been migrated
- **"Deprecation Notice"** - The data source has not been migrated
- **No banner** - No migration is needed for this data source
## Common migration issues
The following sections contain troubleshooting guidance.
**Migration banner not appearing**
- Verify the `prometheusTypeMigration` feature toggle is enabled
- Restart Grafana after enabling the feature toggle
**Amazon Managed Service for Prometheus is not installed**
- Verify that Amazon Managed Service for Prometheus is installed by going to **Connections** > **Add new connection** and search for "Amazon Managed Service for Prometheus"
- Install Amazon Managed Service for Prometheus if not already installed
**After migrating, my data source returns "401 Unauthorized"**
- If you are using self-hosted Grafana, check your .ini for `grafana-amazonprometheus-datasource` is included in `forward_settings_to_plugins` under the `[aws]` heading.
- If you are using Grafana Cloud, contact Grafana support.
### Rollback self-hosted Grafana without a backup
If you dont have a backup of your Grafana instance before the migration, remove the `prometheusTypeMigration` feature toggle, and run the following script. It reverts all Amazon Managed Service for Prometheus data sources back to core Prometheus.
To revert the migration:
1. Disable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
2. Obtain a bearer token that has `read` and `write` permissions for your Grafana data source API. For more information on the data source API, refer to [Data source API](/docs/grafana/<GRAFANA_VERSION>/developers/http_api/data_source/).
3. Run the script below. Make sure to provide your Grafana URL and bearer token.
4. (Optional) Report the issue you were experiencing on the [Grafana repository](https://github.com/grafana/grafana/issues). Tag the issue with "datasource/migrate-prometheus-type"
```bash
#!/bin/bash
# Configuration
GRAFANA_URL=""
BEARER_TOKEN=""
LOG_FILE="grafana_migration_$(date +%Y%m%d_%H%M%S).log"
# Function to log messages to both console and file
log_message() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] $message" | tee -a "$LOG_FILE"
}
# Function to update a data source
update_data_source() {
local uid="$1"
local data="$2"
response=$(curl -s -w "\n%{http_code}" -X PUT \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $BEARER_TOKEN" \
-d "$data" \
"$GRAFANA_URL/api/datasources/uid/$uid")
http_code=$(echo "$response" | tail -n1)
response_body=$(echo "$response" | sed '$d')
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
log_message "$uid successful"
else
log_message "$uid error: HTTP $http_code - $response_body"
fi
}
# Function to process and update data source types
update_data_source_type() {
local result="$1"
local processed_count=0
local updated_count=0
local readonly_count=0
local skipped_count=0
# Use jq to parse and process JSON
echo "$result" | jq -c '.[]' | while read -r data; do
uid=$(echo "$data" | jq -r '.uid')
prometheus_type_migration=$(echo "$data" | jq -r '.jsonData["prometheus-type-migration"] // false')
data_type=$(echo "$data" | jq -r '.type')
read_only=$(echo "$data" | jq -r '.readOnly // false')
processed_count=$((processed_count + 1))
# Check conditions
if [[ "$prometheus_type_migration" != "true" ]] || [[ "$data_type" != "grafana-amazonprometheus-datasource" ]]; then
skipped_count=$((skipped_count + 1))
continue
fi
if [[ "$read_only" == "true" ]]; then
readonly_count=$((readonly_count + 1))
log_message "$uid is readOnly. If this data source is provisioned, edit the data source type to be \`prometheus\` in the provisioning file."
continue
fi
# Update the data
updated_data=$(echo "$data" | jq '.type = "prometheus" | .jsonData["prometheus-type-migration"] = false')
update_data_source "$uid" "$updated_data"
updated_count=$((updated_count + 1))
# Log the raw data for debugging (optional - uncomment if needed)
# log_message "DEBUG - Updated data for $uid: $updated_data"
done
# Note: These counts won't work in the while loop due to subshell
# Moving summary to the main function instead
}
# Function to get summary statistics
get_summary_stats() {
local result="$1"
local total_datasources=$(echo "$result" | jq '. | length')
local migration_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-amazonprometheus-datasource")] | length')
local readonly_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-amazonprometheus-datasource" and .readOnly == true)] | length')
local updateable_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-amazonprometheus-datasource" and (.readOnly == false or .readOnly == null))] | length')
log_message "=== MIGRATION SUMMARY ==="
log_message "Total data sources found: $total_datasources"
log_message "Migration candidates found: $migration_candidates"
log_message "Read-only candidates (will be skipped): $readonly_candidates"
log_message "Updateable candidates: $updateable_candidates"
log_message "=========================="
}
# Main function to remove Prometheus type migration
remove_prometheus_type_migration() {
log_message "Starting remove Azure Prometheus migration"
log_message "Log file: $LOG_FILE"
log_message "Grafana URL: $GRAFANA_URL"
response=$(curl -s -w "\n%{http_code}" -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $BEARER_TOKEN" \
"$GRAFANA_URL/api/datasources/")
http_code=$(echo "$response" | tail -n1)
response_body=$(echo "$response" | sed '$d')
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
log_message "Successfully fetched data sources"
get_summary_stats "$response_body"
update_data_source_type "$response_body"
log_message "Migration process completed"
else
log_message "error fetching data sources: HTTP $http_code - $response_body"
fi
}
# Function to initialize log file
initialize_log() {
echo "=== Grafana Azure Prometheus Migration Log ===" > "$LOG_FILE"
echo "Started at: $(date)" >> "$LOG_FILE"
echo "=============================================" >> "$LOG_FILE"
echo "" >> "$LOG_FILE"
}
# Check if jq is installed
if ! command -v jq &> /dev/null; then
echo "Error: jq is required but not installed. Please install jq to run this script."
exit 1
fi
# Check if required variables are set
if [[ -z "$GRAFANA_URL" || -z "$BEARER_TOKEN" ]]; then
echo "Error: Please set GRAFANA_URL and BEARER_TOKEN variables at the top of the script."
exit 1
fi
# Initialize log file
initialize_log
# Execute main function
log_message "Script started"
remove_prometheus_type_migration
log_message "Script completed"
# Final log message
echo ""
echo "Migration completed. Full log available at: $LOG_FILE"
```
If you continue to experience issues, check the Grafana server logs for detailed error messages and contact [Grafana Support](https://grafana.com/help/) with your troubleshooting results.

View File

@ -0,0 +1,359 @@
---
aliases:
- ../data-sources/prometheus/
- ../features/datasources/prometheus/
description: Guide for authenticating with Azure Monitor Managed Service for Prometheus in Grafana
keywords:
- grafana
- prometheus
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Authenticating with Azure
title: Configure the Prometheus data source
weight: 200
refs:
intro-to-prometheus:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/intro-to-prometheus/
exemplars:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/fundamentals/exemplars/
configure-data-links-value-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/configure-data-links/#value-variables
alerting-alert-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/
add-a-data-source:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/#add-a-data-source
prom-query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/prometheus/query-editor
default-manage-alerts-ui-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
provision-grafana:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
manage-alerts-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_manage_alerts_ui_toggle
manage-recording-rules-toggle:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#default_allow_recording_rules_target_alerts_ui_toggle
private-data-source-connect:
- pattern: /docs/grafana/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
- pattern: /docs/grafana-cloud/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
configure-pdc:
- pattern: /docs/grafana/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
azure-active-directory:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/#configure-azure-active-directory-ad-authentication
configure-grafana-configuration-file-location:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#configuration-file-location
grafana-managed-recording-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
---
# Connect to Azure Monitor Managed Service for Prometheus
After creating a Azure Monitor Managed Service for Prometheus data source:
1. In the data source configuration page, locate the **Authentication** section
2. Select your authentication method:
- **Managed Identity**: For Azure-hosted Grafana instances. To learn more about Entra login for Grafana, refer to [Configure Azure AD/Entra ID OAuth authentication](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#configure-azure-adentra-id-oauth-authentication)
- **App Registration**: For service principal authentication
- **Current User**: Uses the current user's Azure AD credentials
3. Configure based on your chosen method:
| Setting | Description | Example |
| --------------------------- | ------------------------------- | -------------------------------------- |
| **Directory (tenant) ID** | Your Azure AD tenant ID | `12345678-1234-1234-1234-123456789012` |
| **Application (client) ID** | Your app registration client ID | `87654321-4321-4321-4321-210987654321` |
| **Client secret** | Your app registration secret | `your-client-secret` |
When using Managed Identity for authentication:
- No additional configuration required if using system-assigned identity.
- For user-assigned identity, provide the **Client ID**.
4. Set the **Prometheus server URL** to your Azure Monitor workspace endpoint:
```
https://your-workspace.eastus2.prometheus.monitor.azure.com
```
5. Click **Save & test** to verify the connection
## Example configuration
```yaml
# Example provisioning configuration for App Registration
apiVersion: 1
datasources:
- name: 'Azure Monitor Prometheus'
type: 'grafana-azureprometheus-datasource'
url: 'https://your-workspace.eastus2.prometheus.monitor.azure.com'
jsonData:
azureCredentials:
authType: 'clientsecret'
azureCloud: 'AzureCloud'
clientId: '<client_id>'
httpMethod: 'POST'
tenantId: '<tenant_id>'
secureJsonData:
clientSecret: 'your-client-secret'
```
## Migrate to Azure Monitor Managed Service for Prometheus
Learn more about why this is happening: [Prometheus data source update: Redefining our big tent philosophy](https://grafana.com/blog/2025/06/16/prometheus-data-source-update-redefining-our-big-tent-philosophy/)
Before you begin, ensure you have the organization administrator role. If you are self-hosting Grafana, back up your existing dashboard configurations and queries.
Grafana Cloud users will be automatically migrated to the relevant version of Prometheus, so no action needs to be taken.
For air-gapped environments, download and install [Azure Monitor Managed Service for Prometheus](https://grafana.com/grafana/plugins/grafana-azureprometheus-datasource/), then follow the standard migration process.
### Migrate
1. Enable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
2. Restart Grafana for the changes to take effect.
{{< admonition type="note" >}}
This feature toggle will be removed in Grafana 13, and the migration will be automatic.
{{< /admonition >}}
To determine if your Prometheus data sources have been migrated:
1. Navigate to **Connections** > **Data sources**
2. Select your Prometheus data source
3. Look for a migration banner at the top of the configuration page
The banner displays one of the following messages:
- **"Migration Notice"** - The data source has already been migrated
- **"Deprecation Notice"** - The data source has not been migrated
- **No banner** - No migration is needed for this data source
## Common migration issues
The following sections contain troubleshooting guidance.
**Migration banner not appearing**
- Verify the `prometheusTypeMigration` feature toggle is enabled.
- Restart Grafana after enabling the feature toggle
**Azure Monitor Managed Service for Prometheus is not installed**
- Verify that Azure Monitor Managed Service for Prometheus is installed by going to **Connections** > **Add new connection** and search for "Azure Monitor Managed Service for Prometheus"
- Install Azure Monitor Managed Service for Prometheus if not already installed
**After migrating, my data source returns "401 Unauthorized"**
- If you are using self-hosted Grafana, check your .ini for `grafana-azureprometheus-datasource` is included in `forward_settings_to_plugins` under the `[azure]` heading.
- If you are using Grafana Cloud, contact Grafana support.
### Rollback self-hosted Grafana without a backup
If you dont have a backup of your Grafana instance before the migration, remove the `prometheusTypeMigration` feature toggle, and run the following script. It reverts all the Azure Monitor Managed Service data source instances back to core Prometheus.
To revert the migration:
1. Disable the `prometheusTypeMigration` feature toggle. For more information on feature toggles, refer to [Manage feature toggles](/docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/#manage-feature-toggles).
2. Obtain a bearer token that has `read` and `write` permissions for your Grafana data source API. For more information on the data source API, refer to [Data source API](/docs/grafana/<GRAFANA_VERSION>/developers/http_api/data_source/).
3. Run the script below. Make sure to provide your Grafana URL and bearer token.
4. (Optional) Report the issue you were experiencing on the [Grafana repository](https://github.com/grafana/grafana/issues). Tag the issue with "datasource/migrate-prometheus-type"
```bash
#!/bin/bash
# Configuration
GRAFANA_URL=""
BEARER_TOKEN=""
LOG_FILE="grafana_migration_$(date +%Y%m%d_%H%M%S).log"
# Function to log messages to both console and file
log_message() {
local message="$1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] $message" | tee -a "$LOG_FILE"
}
# Function to update a data source
update_data_source() {
local uid="$1"
local data="$2"
response=$(curl -s -w "\n%{http_code}" -X PUT \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $BEARER_TOKEN" \
-d "$data" \
"$GRAFANA_URL/api/datasources/uid/$uid")
http_code=$(echo "$response" | tail -n1)
response_body=$(echo "$response" | sed '$d')
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
log_message "$uid successful"
else
log_message "$uid error: HTTP $http_code - $response_body"
fi
}
# Function to process and update data source types
update_data_source_type() {
local result="$1"
local processed_count=0
local updated_count=0
local readonly_count=0
local skipped_count=0
# Use jq to parse and process JSON
echo "$result" | jq -c '.[]' | while read -r data; do
uid=$(echo "$data" | jq -r '.uid')
prometheus_type_migration=$(echo "$data" | jq -r '.jsonData["prometheus-type-migration"] // false')
data_type=$(echo "$data" | jq -r '.type')
read_only=$(echo "$data" | jq -r '.readOnly // false')
processed_count=$((processed_count + 1))
# Check conditions
if [[ "$prometheus_type_migration" != "true" ]] || [[ "$data_type" != "grafana-azureprometheus-datasource" ]]; then
skipped_count=$((skipped_count + 1))
continue
fi
if [[ "$read_only" == "true" ]]; then
readonly_count=$((readonly_count + 1))
log_message "$uid is readOnly. If this data source is provisioned, edit the data source type to be \`prometheus\` in the provisioning file."
continue
fi
# Update the data
updated_data=$(echo "$data" | jq '.type = "prometheus" | .jsonData["prometheus-type-migration"] = false')
update_data_source "$uid" "$updated_data"
updated_count=$((updated_count + 1))
# Log the raw data for debugging (optional - uncomment if needed)
# log_message "DEBUG - Updated data for $uid: $updated_data"
done
# Note: These counts won't work in the while loop due to subshell
# Moving summary to the main function instead
}
# Function to get summary statistics
get_summary_stats() {
local result="$1"
local total_datasources=$(echo "$result" | jq '. | length')
local migration_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-azureprometheus-datasource")] | length')
local readonly_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-azureprometheus-datasource" and .readOnly == true)] | length')
local updateable_candidates=$(echo "$result" | jq '[.[] | select(.jsonData["prometheus-type-migration"] == true and .type == "grafana-azureprometheus-datasource" and (.readOnly == false or .readOnly == null))] | length')
log_message "=== MIGRATION SUMMARY ==="
log_message "Total data sources found: $total_datasources"
log_message "Migration candidates found: $migration_candidates"
log_message "Read-only candidates (will be skipped): $readonly_candidates"
log_message "Updateable candidates: $updateable_candidates"
log_message "=========================="
}
# Main function to remove Prometheus type migration
remove_prometheus_type_migration() {
log_message "Starting remove Azure Prometheus migration"
log_message "Log file: $LOG_FILE"
log_message "Grafana URL: $GRAFANA_URL"
response=$(curl -s -w "\n%{http_code}" -X GET \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $BEARER_TOKEN" \
"$GRAFANA_URL/api/datasources/")
http_code=$(echo "$response" | tail -n1)
response_body=$(echo "$response" | sed '$d')
if [[ "$http_code" -ge 200 && "$http_code" -lt 300 ]]; then
log_message "Successfully fetched data sources"
get_summary_stats "$response_body"
update_data_source_type "$response_body"
log_message "Migration process completed"
else
log_message "error fetching data sources: HTTP $http_code - $response_body"
fi
}
# Function to initialize log file
initialize_log() {
echo "=== Grafana Azure Prometheus Migration Log ===" > "$LOG_FILE"
echo "Started at: $(date)" >> "$LOG_FILE"
echo "=============================================" >> "$LOG_FILE"
echo "" >> "$LOG_FILE"
}
# Check if jq is installed
if ! command -v jq &> /dev/null; then
echo "Error: jq is required but not installed. Please install jq to run this script."
exit 1
fi
# Check if required variables are set
if [[ -z "$GRAFANA_URL" || -z "$BEARER_TOKEN" ]]; then
echo "Error: Please set GRAFANA_URL and BEARER_TOKEN variables at the top of the script."
exit 1
fi
# Initialize log file
initialize_log
# Execute main function
log_message "Script started"
remove_prometheus_type_migration
log_message "Script completed"
# Final log message
echo ""
echo "Migration completed. Full log available at: $LOG_FILE"
```
If you continue to experience issues, check the Grafana server logs for detailed error messages and contact [Grafana Support](https://grafana.com/help/) with your troubleshooting results.

View File

@ -34,7 +34,7 @@ After you localize the latency problem to a few exemplar traces, you can combine
Support for exemplars is available for the Prometheus data source only. Support for exemplars is available for the Prometheus data source only.
After you enable the functionality, exemplar data is available by default. After you enable the functionality, exemplar data is available by default.
For more information on exemplar configuration and how to enable exemplars, refer to [configuring exemplars in the Prometheus data source](../../datasources/prometheus/configure-prometheus-data-source/#exemplars). For more information on exemplar configuration and how to enable exemplars, refer to the Exemplars section in [Prometheus configuration options](https://grafana.com/docs/grafana/latest/datasources/prometheus/configure/#configuration-options).
Grafana shows exemplars alongside a metric in the Explore view and in dashboards. Grafana shows exemplars alongside a metric in the Explore view and in dashboards.
Each exemplar displays as a highlighted star. Each exemplar displays as a highlighted star.

View File

@ -37,7 +37,7 @@ For an integrated, UI-driven Git workflow focused on dashboards, explore Git Syn
- Connect folders or entire Grafana instances directly to a GitHub repository to synchronize dashboard definitions, enabling version control, branching, and pull requests directly from Grafana. - Connect folders or entire Grafana instances directly to a GitHub repository to synchronize dashboard definitions, enabling version control, branching, and pull requests directly from Grafana.
- Git Sync offers a simple, out-of-the-box approach for managing dashboards as code. - Git Sync offers a simple, out-of-the-box approach for managing dashboards as code.
{{< admonition type="note" >}} {{< admonition type="note" >}}
Git Sync is an **experimental feature** in Grafana 12, available in Grafana OSS and Enterprise [nightly releases](https://grafana.com/grafana/download/nightly). It is not yet available in Grafana Cloud. Git Sync is available in **private preview** for Grafana Cloud, and it's an **experimental feature** in Grafana 12, available in Grafana OSS and Enterprise [nightly releases](https://grafana.com/grafana/download/nightly).
{{< /admonition >}} {{< /admonition >}}
Refer to the [Git Sync documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/) to learn more. Refer to the [Git Sync documentation](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/) to learn more.

View File

@ -18,16 +18,18 @@ weight: 300
# Provision resources and sync dashboards # Provision resources and sync dashboards
{{< admonition type="caution" >}} {{< admonition type="caution" >}}
Provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9). Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud. Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided. You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
Git Sync and local file provisioning are [experimental features](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided.
{{< /admonition >}} {{< /admonition >}}
Provisioning is an experimental feature that allows you to configure how to store your dashboard JSONs and other files in GitHub repositories using either Git Sync or a local path. Provisioning allows you to configure how to store your dashboard JSON and other files in GitHub repositories using either Git Sync or a local path.
Of the two options, **Git Sync** is the favorited method for provisioning your dashboards. You can synchronize any new dashboards and changes to existing dashboards from the UI to your configured GitHub repository. If you push a change in the repository, those changes are mirrored in your Grafana instance. See [Git Sync workflow](#git-sync-workflow). Of the two options, **Git Sync** is the favorited method for provisioning your dashboards. You can synchronize any new dashboards and changes to existing dashboards from the UI to your configured GitHub repository. If you push a change in the repository, those changes are mirrored in your Grafana instance. Refer to [Git Sync workflow](#git-sync-workflow) for more information.
Alternatively, **local file provisioning** allows you to include in your Grafana instance resources (such as folders and dashboard JSON files) that are stored in a local file system. See [Local file workflow](local-file-workflow). Alternatively, **local file provisioning** allows you to include in your Grafana instance resources (such as folders and dashboard JSON files) that are stored in a local file system. Refer to [Local file workflow](#local-file-workflow) for more information.
## Provisioned folders and connections ## Provisioned folders and connections
@ -40,8 +42,7 @@ You can set a single folder, or multiple folders to a different repository, with
In the Git Sync workflow: In the Git Sync workflow:
- When you provision resources with Git Sync you can modify them from within the Grafana UI or within the GitHub repository. Changes made in either the repository or the Grafana UI are bidirectional. - When you provision resources with Git Sync you can modify them from within the Grafana UI or within the GitHub repository. Changes made in either the repository or the Grafana UI are bidirectional.
- Any changes made in the provisioned files stored in the GitHub repository are reflected in the Grafana database. By default, Grafana polls GitHub every 60 seconds. - Any changes made in the provisioned files stored in the GitHub repository are reflected in the Grafana database. By default, Grafana polls GitHub every 60 seconds. The Grafana UI reads from the database and updates the UI to reflect these changes.
- The Grafana UI reads from the database and updates the UI to reflect these changes.
For example, if you update a dashboard within the Grafana UI and click **Save** to preserve the changes, you'll be notified that the dashboard is provisioned in a GitHub repository. Next you'll be prompted to choose how to preserve the changes: either directly to a branch, or pushed to a new branch using a pull request in GitHub. For example, if you update a dashboard within the Grafana UI and click **Save** to preserve the changes, you'll be notified that the dashboard is provisioned in a GitHub repository. Next you'll be prompted to choose how to preserve the changes: either directly to a branch, or pushed to a new branch using a pull request in GitHub.
@ -52,8 +53,7 @@ For more information, see [Introduction to Git Sync](https://grafana.com/docs/gr
In the local file workflow: In the local file workflow:
- All provisioned resources are changed in the local files. - All provisioned resources are changed in the local files.
- Any changes made in the provisioned files are reflected in the Grafana database. - Any changes made in the provisioned files are reflected in the Grafana database. The Grafana UI reads the database and updates the UI to reflect these changes.
- The Grafana UI reads the database and updates the UI to reflect these changes.
- You can't use the Grafana UI to edit or delete provisioned resources. - You can't use the Grafana UI to edit or delete provisioned resources.
Learn more in [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/). Learn more in [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/).

View File

@ -16,9 +16,8 @@ weight: 200
# Set up file provisioning # Set up file provisioning
{{< admonition type="caution" >}} {{< admonition type="caution" >}}
Local file provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9). Local file provisioning is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions, but it's **not available in Grafana Cloud**. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided.
{{< /admonition >}} {{< /admonition >}}
@ -48,10 +47,14 @@ Refer to [Provision Grafana](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/
### Limitations ### Limitations
- A provisioned dashboard can't be deleted from within Grafana UI. The dashboard has to be deleted at the local file system and those changes synced to Grafana. - A provisioned dashboard can't be deleted from within Grafana UI. The dashboard has to be deleted at the local file system and those changes synced to Grafana.
- Changes from the local file system are one way: you can't save changes from the UI to GitHub. - Changes from the local file system are one way: you can't save changes from the Grafana UI to GitHub.
## Before you begin ## Before you begin
{{< admonition type="note" >}}
Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature.
{{< /admonition >}}
To set up file provisioning, you need: To set up file provisioning, you need:
- Administration rights in your Grafana organization. - Administration rights in your Grafana organization.
@ -122,15 +125,15 @@ The set up process verifies the path and provides an error message if a problem
### Choose what to synchronize ### Choose what to synchronize
In this section, you determine the actions taken with the storage you selected. Choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
1. Select how resources should be handled in Grafana. - Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. With this option, all of your dashboards are synced to that one repository. You can only have one provisioned connection with this selection, and you won't have the option of setting up additional repositories to connect to.
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. You can only have one provisioned connection with this selection. - Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections.
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 folders. - Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
<!-- - Select **Migrate instance to repository** to migrate the Grafana instance to the repository. This option is not available during the first time you set up remote provisioning. -->
1. Select **Synchronize** to continue. Next, enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
Click **Synchronize** to continue.
### Synchronize with external storage ### Synchronize with external storage

View File

@ -16,50 +16,60 @@ weight: 100
# Set up Git Sync # Set up Git Sync
{{< admonition type="caution" >}} {{< admonition type="caution" >}}
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9). Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud, and is an [experimental feature](https://grafana.com/docs/release-life-cycle/) in Grafana v12 for open source and Enterprise editions.
Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided.
You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
{{< /admonition >}} {{< /admonition >}}
Git Sync lets you manage Grafana dashboards as code by storing dashboards JSON files and folders in a remote GitHub repository. Git Sync lets you manage Grafana dashboards as code by storing dashboard JSON files and folders in a remote GitHub repository.
Alternatively, you can configure a local file system instead of using GitHub.
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for information.
This page explains how to use Git Sync with a GitHub repository. To set up Git Sync and synchronize with a GitHub repository follow these steps:
To set up Git Sync, you need to: 1. [Enable feature toggles in Grafana](#enable-required-feature-toggles) (first time set up).
1. [Create a GitHub access token](#create-a-github-access-token).
1. [Configure a connection to your GitHub repository](#set-up-the-connection-to-github).
1. [Choose what content to sync with Grafana](#choose-what-to-synchronize).
1. Enable feature toggles in Grafana (first time set up). Optionally, you can [extend Git Sync](#configure-webhooks-and-image-rendering) by enabling pull request notifications and image previews of dashboard changes.
1. Configure a connection to your GitHub repository.
1. Choose what content to sync with Grafana.
1. Optional: Extend Git Sync by enabling pull request notifications and image previews of dashboard changes.
| Capability | Benefit | Requires | | Capability | Benefit | Requires |
| ----------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------- | | ----------------------------------------------------- | ------------------------------------------------------------------------------- | -------------------------------------- |
| Adds a table summarizing changes to your pull request | Provides a convenient way to save changes back to GitHub. | Webhooks configured | | Adds a table summarizing changes to your pull request | Provides a convenient way to save changes back to GitHub. | Webhooks configured |
| Add a dashboard preview image to a PR | View a snapshot of dashboard changes to a pull request without opening Grafana. | Image renderer plugin and webhooks configured | | Add a dashboard preview image to a PR | View a snapshot of dashboard changes to a pull request without opening Grafana. | Image renderer and webhooks configured |
{{< admonition type="note" >}}
Alternatively, you can configure a local file system instead of using GitHub. Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for more information.
{{< /admonition >}}
## Performance impacts of enabling Git Sync ## Performance impacts of enabling Git Sync
Git Sync is an experimental feature and is under continuous development. Git Sync is an experimental feature and is under continuous development. Reporting any issues you encounter can help us improve Git Sync.
We recommend evaluating the performance impact, if any, in a non-production environment. When Git Sync is enabled, the database load might increase, especially for instances with a lot of folders and nested folders. Evaluate the performance impact, if any, in a non-production environment.
When Git Sync is enabled, the database load might increase, especially for instances with a lot of folders and nested folders.
Reporting any issues you encounter can help us improve Git Sync.
## Before you begin ## Before you begin
{{< admonition type="caution" >}}
Refer to [Known limitations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#known-limitations/) before using Git Sync.
{{< /admonition >}}
To set up Git Sync, you need: To set up Git Sync, you need:
- Administration rights in your Grafana organization. - Administration rights in your Grafana organization.
- Enable the required feature toggles in your Grafana instance. Refer to [Enable required feature toggles](#enable-required-feature-toggles) for instructions. - Enable the required feature toggles in your Grafana instance. Refer to [Enable required feature toggles](#enable-required-feature-toggles) for instructions.
- A GitHub repository to store your dashboards in. - A GitHub repository to store your dashboards in.
- If you want to use a local file path, refer to [the local file path guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/). - If you want to use a local file path, refer to [the local file path guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/).
- A GitHub access token. The Grafana UI will also explain this to you as you set it up. - A GitHub access token. The Grafana UI will prompt you during setup.
- Optional: A public Grafana instance. - Optional: A public Grafana instance.
- Optional: Image Renderer plugin to save image previews with your PRs. - Optional: The [Image Renderer service](https://github.com/grafana/grafana-image-renderer) to save image previews with your PRs.
## Enable required feature toggles ## Enable required feature toggles
@ -118,28 +128,25 @@ To connect your GitHub repository, follow these steps:
### Choose what to synchronize ### Choose what to synchronize
You can choose to either use one repository for an entire organization or to a new Grafana folder (up to 10 connections). {{< admonition type="caution" >}}
If you choose to sync all resources with external storage, then all of your dashboards are synced to that one repository.
You won't have the option of setting up additional repositories to connect to.
You can choose to synchronize all resources with GitHub or you can sync resources to a new Grafana folder. If you're using Git Sync in Grafana Cloud you can only sync specific folders for the moment. Git Sync will be available for your full instance soon.
The options you have depend on the status of your GitHub repository.
For example, if you are syncing with a new or empty repository, you won't have an option to migrate dashboards.
1. Select how resources should be handled in Grafana. {{< /admonition >}}
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. You can only have one provisioned connection with this selection. In this step you can decide which elements to synchronize. Keep in mind the available options depend on the status of your GitHub repository. The first time you connect Grafana with a GitHub repository, you need to synchronize with external storage. If you are syncing with a new or empty repository, you won't have an option to migrate dashboards.
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections. - Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
<!-- - Select **Migrate instance to repository** to migrate the Grafana instance to the repository. This option is not available during the first time you set up remote provisioning. -->
1. Select **Synchronize** to continue. 1. Choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. With this option, all of your dashboards are synced to that one repository. You can only have one provisioned connection with this selection, and you won't have the option of setting up additional repositories to connect to.
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections.
1. Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
1. Click **Synchronize** to continue.
<!-- This is only relevant if we include the "Migrate instance to repository" option above. -->
<!-- ### Synchronize with external storage <!-- ### Synchronize with external storage
The first time you connect Grafana with a GitHub repository, you need to synchronize with external storage.
Future updates will be automatically saved to the repository and provisioned back to the instance.
{{< admonition type="note">}} {{< admonition type="note">}}
During the synchronization process, your dashboards will be temporarily unavailable. During the synchronization process, your dashboards will be temporarily unavailable.
No data or configuration will be lost. No data or configuration will be lost.
@ -157,15 +164,14 @@ Finally, you can set up how often your configured storage is polled for updates.
1. For **Update instance interval (seconds)**, enter how often you want the instance to pull updates from GitHub. The default value is 60 seconds. 1. For **Update instance interval (seconds)**, enter how often you want the instance to pull updates from GitHub. The default value is 60 seconds.
1. Optional: Select **Read only** to ensure resources can't be modified in Grafana. 1. Optional: Select **Read only** to ensure resources can't be modified in Grafana.
<!-- No workflow option listed in the UI. 1. For **Workflows**, select the GitHub workflows that you want to allow to run in the repository. Both **Branch** and **Write** are selected by default. --> <!-- No workflow option listed in the UI. 1. For **Workflows**, select the GitHub workflows that you want to allow to run in the repository. Both **Branch** and **Write** are selected by default. -->
1. Optional: If you have the Grafana Image Renderer plugin configured, you can **Enable dashboards previews in pull requests**. If image rendering is not available, then you can't select this option. For more information, refer to [Grafana Image Renderer](https://grafana.com/grafana/plugins/grafana-image-renderer/). 1. Optional: If you have the Grafana Image Renderer plugin configured, you can **Enable dashboards previews in pull requests**. If image rendering is not available, then you can't select this option. For more information, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
1. Select **Finish** to proceed. 1. Select **Finish** to proceed.
## Verify your dashboards in Grafana ## Verify your dashboards in Grafana
To verify that your dashboards are available at the location that you specified, click **Dashboards**. The name of the dashboard is listed in the **Name** column. To verify that your dashboards are available at the location that you specified, click **Dashboards**. The name of the dashboard is listed in the **Name** column.
Now that your dashboards have been synced from a repository, you can customize the name, change the branch, and create a pull request (PR) for it. Now that your dashboards have been synced from a repository, you can customize the name, change the branch, and create a pull request (PR) for it. Refer to [Manage provisioned repositories with Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/) for more information.
Refer to [Use Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/use-git-sync/) for more information.
## Configure webhooks and image rendering ## Configure webhooks and image rendering
@ -214,8 +220,7 @@ The necessary paths required to be exposed are (RegExp):
By setting up image rendering, you can add visual previews of dashboard updates directly in pull requests. By setting up image rendering, you can add visual previews of dashboard updates directly in pull requests.
Image rendering also requires webhooks. Image rendering also requires webhooks.
You can enable this capability by installing the Grafana Image Renderer plugin in your Grafana instance. You can enable this capability by installing the Grafana Image Renderer in your Grafana instance. For more information and installation instructions, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
For more information and installation instructions, refer to [Grafana Image Renderer](https://grafana.com/grafana/plugins/grafana-image-renderer/).
## Modify configurations after set up is complete ## Modify configurations after set up is complete

View File

@ -16,67 +16,86 @@ weight: 100
# Introduction to Git Sync # Introduction to Git Sync
{{< admonition type="caution" >}} {{< admonition type="caution" >}}
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9). Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud, and is an [experimental feature](https://grafana.com/docs/release-life-cycle/) in Grafana v12 for open source and Enterprise editions.
Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided.
You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
{{< /admonition >}} {{< /admonition >}}
Git Sync in Grafana lets you manage your dashboards as code as JSON files stored in GitHub. You and your team can version control, collaborate, and automate deployments efficiently.
Using Git Sync, you can: Using Git Sync, you can:
- Manage dashboard configuration outside of Grafana instances using Git
- Introduce a review process for creating and modifying dashboards - Introduce a review process for creating and modifying dashboards
- Manage dashboard configuration outside of Grafana instances
- Replicate dashboards across multiple instances - Replicate dashboards across multiple instances
Whenever a dashboard is modified, Grafana can commit changes to Git upon saving. Users can configure settings to either enforce PR approvals before merging or allow direct commits.
Users can push changes directly to GitHub and see them in Grafana. Similarly, automated workflows can do changes that will be automatically represented in Grafana by updating Git.
Because the dashboards are defined in JSON files, you can enable as-code workflows where the JSON is output from Go, TypeScript, or another coding language in the format of a dashboard schema.
To learn more about creating dashboards in a coding language to provision them for Git Sync, refer to the [Foundation SDK](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/foundation-sdk) documentation.
## How it works ## How it works
Git Sync is bidirectional and also works with changes done directly in GitHub as well as within the Grafana UI. Git Sync is bidirectional and works both with changes done directly in GitHub as well as in the Grafana UI.
Grafana periodically polls GitHub at a regular internal to synchronize any changes.
With the webhooks feature enabled, repository notifications appear almost immediately.
Without webhooks, Grafana polls for changes at the specified interval.
The default polling interval is 60 seconds.
Any changes made in the provisioned files stored in the GitHub repository are reflected in the Grafana database. ### Make changes in Grafana
The Grafana UI reads the database and updates the UI to reflect these changes.
Whenever you modify a dashboard directly from the UI, Grafana can commit changes to Git upon saving. You can configure settings to either enforce PR approvals before merging in your repository, or allow direct commits.
Grafana periodically polls GitHub at a regular internal to synchronize any changes. The default polling interval is 60 seconds, and you can change this setting in the Grafana UI.
- If you enable the [webhooks feature](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup/#configure-webhooks-and-image-rendering), repository notifications appear almost immediately.
- Without webhooks, Grafana polls for changes at the specified interval.
### Make changes in your GitHub repositories
With Git Sync, you can make changes in your provisioned files in GitHub and see them in Grafana. Automated workflows ensure those changes are automatically represented in the Grafana database by updating Git. The Grafana UI reads the database and updates the UI to reflect these changes.
## Known limitations
Git Sync is under development and the following limitations apply:
- You can only authenticate in GitHub using your Personal Access Token token.
- Support for native Git, Git app, and other providers, such as GitLab or Bitbucket, is on the roadmap.
- If you're using Git Sync in Grafana Cloud you can only sync specific folders for the moment. Git Sync will be available for your full instance soon.
- Restoring resources from the UI is currently not possible. As an alternative, you can restore dashboards directly in your GitHub repository by raising a PR, and they will be updated in Grafana.
## Common use cases ## Common use cases
Git Sync in Grafana lets you manage dashboards as code. You can use Git Sync in the following scenarios.
Because your dashboard JSON files are stored in GitHub, you and your team can version control, collaborate, and automate deployments efficiently.
### Version control and auditing ### Version control and auditing
Organizations can maintain a structured, version-controlled history of Grafana dashboards. Organizations can maintain a structured, version-controlled history of Grafana dashboards. The version control lets you revert to previous versions when necessary, compare modifications across commits, and ensure transparency in dashboard management.
The version control lets you revert to previous versions when necessary, compare modifications across commits, and ensure transparency in dashboard management.
Additionally, having a detailed history of changes enhances compliance efforts, as teams can generate audit logs that document who made changes, when they were made, and why. Additionally, having a detailed history of changes enhances compliance efforts, as teams can generate audit logs that document who made changes, when they were made, and why.
### Automated deployment and CI/CD integration ### Automated deployment and CI/CD integration
Teams can streamline their workflow by integrating dashboard updates into their CI/CD pipelines. Teams can streamline their workflow by integrating dashboard updates into their CI/CD pipelines. By pushing changes to GitHub, automated processes can trigger validation checks, test dashboard configurations, and deploy updates programmatically using the `grafanactl` CLI and Foundation SDK.
By pushing changes to GitHub, automated processes can trigger validation checks, test dashboard configurations, and deploy updates programmatically using the `grafanactl` CLI and Foundation SDK.
This reduces the risk of human errors, ensures consistency across environments, and enables a faster, more reliable release cycle for dashboards used in production monitoring and analytics. This reduces the risk of human errors, ensures consistency across environments, and enables a faster, more reliable release cycle for dashboards used in production monitoring and analytics.
### Collaborative dashboard development ### Collaborative dashboard development
With Git Sync, multiple users can work on dashboards simultaneously without overwriting each others modifications. With Git Sync, multiple users can work on dashboards simultaneously without overwriting each others modifications.
By leveraging pull requests and branch-based workflows, teams can submit changes for review before merging them into the main branch. This process not only improves quality control but also ensures that dashboards adhere to best practices and organizational standards. Additionally, GitHubs built-in discussion and review tools facilitate effective collaboration, making it easier to address feedback before changes go live. By leveraging pull requests and branch-based workflows, teams can submit changes for review before merging them into the main branch. This process not only improves quality control but also ensures that dashboards adhere to best practices and organizational standards.
Additionally, GitHubs built-in discussion and review tools facilitate effective collaboration, making it easier to address feedback before changes go live.
### Multi-environment synchronization ### Multi-environment synchronization
Enterprises managing multiple Grafana instances, such as development, staging, and production environments, can seamlessly sync dashboards across these instances. Enterprises managing multiple Grafana instances, such as development, staging, and production environments, can seamlessly sync dashboards across these instances. This ensures consistency in visualization and monitoring configurations, reducing discrepancies that might arise from manually managing dashboards in different environments.
This ensures consistency in visualization and monitoring configurations, reducing discrepancies that might arise from manually managing dashboards in different environments.
By using Git Sync, teams can automate deployments across environments, eliminating repetitive setup tasks and maintaining a standardized monitoring infrastructure across the organization. By using Git Sync, teams can automate deployments across environments, eliminating repetitive setup tasks and maintaining a standardized monitoring infrastructure across the organization.
### Disaster recovery and backup ### Disaster recovery and backup
By continuously syncing dashboards to GitHub, organizations can create an always-updated backup, ensuring dashboards are never lost due to accidental deletion or system failures. By continuously syncing dashboards to GitHub, organizations can create an always-updated backup, ensuring dashboards are never lost due to accidental deletion or system failures.
If an issue arises--such as a corrupted dashboard, unintended modification, or a system crash--teams can quickly restore the latest functional version from the Git repository.
This not only minimizes downtime but also adds a layer of resilience to Grafana monitoring setups, ensuring critical dashboards remain available when needed. If an issue arises, such as a corrupted dashboard, unintended modification, or a system crash, teams can quickly restore the latest functional version from the Git repository. This not only minimizes downtime but also adds a layer of resilience to Grafana monitoring setups, ensuring critical dashboards remain available when needed.
## Provision dashboards as code
Because dashboards are defined in JSON files, you can enable as-code workflows where the JSON file is an output from Go, TypeScript, or another coding language in the format of a dashboard schema.
To learn more about creating dashboards in a coding language to provision them for Git Sync, refer to the [Foundation SDK](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/foundation-sdk) documentation.

View File

@ -16,9 +16,10 @@ weight: 300
# Work with provisioned dashboards # Work with provisioned dashboards
{{< admonition type="caution" >}} {{< admonition type="caution" >}}
Git Sync and File path provisioning an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana. These features aren't available publicly in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9). Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud. Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided. You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
Git Sync and local file provisioning are [experimental features](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided.
{{< /admonition >}} {{< /admonition >}}
@ -30,16 +31,17 @@ For more information, refer to the [Dashboards](https://grafana.com/docs/grafana
Dashboards and folders synchronized using Git Sync or a local file path are referred to as "provisioned" resources. Dashboards and folders synchronized using Git Sync or a local file path are referred to as "provisioned" resources.
Of the two experimental options, Git Sync is the recommended method for provisioning your dashboards. ### Git Sync provisioning
Of the two experimental options, **Git Sync** is the recommended method for provisioning your dashboards.
You can synchronize any new dashboards and changes to existing dashboards to your configured GitHub repository. You can synchronize any new dashboards and changes to existing dashboards to your configured GitHub repository.
If you push a change in the repository, those changes are mirrored in your Grafana instance. If you push a change in the repository, those changes are mirrored in your Grafana instance.
For more information on configuring Git Sync, refer to [Set up Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/).
For more information on configuring Git Sync, refer to [Introduction to Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync/).
### Local path provisioning ### Local path provisioning
Using the local path provisioning makes files from a specified path available within Grafana. Local path provisioning makes files from a specified path available within Grafana, and any changes made in the configured local path are updated in Grafana. Note that these provisioned resources can only be modified in the local files and not within Grafana.
These provisioned resources can only be modified in the local files and not within Grafana.
Any changes made in the configured local path are updated in Grafana.
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup) to learn more about the version of local file provisioning in Grafana 12. Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup) to learn more about the version of local file provisioning in Grafana 12.
@ -114,9 +116,9 @@ Saving changes requires opening a pull request in your GitHub repository.
### Remove dashboards ### Remove dashboards
You can remove a provisioned dashboard by deleting the dashboard from the repository. You can remove a provisioned dashboard by deleting the dashboard from the repository. The Grafana UI updates when the changes from the GitHub repository sync.
Grafana updates when the changes from the GitHub repository sync. To restore a deleted dashboard, raise a PR directly in your GitHub repository. Restoring resources from the UI is currently not possible.
### Tips ### Tips
@ -128,9 +130,6 @@ Grafana updates when the changes from the GitHub repository sync.
## Manage dashboards provisioned with file provisioning ## Manage dashboards provisioned with file provisioning
To update any resources in the local path, you need to edit the files directly and then save them locally. To update any resources in the local path, you need to edit the files directly and then save them locally.
These changes are synchronized to Grafana. These changes are synchronized to Grafana. However, you can't create, edit, or delete these resources using the Grafana UI.
However, you can't create, edit, or delete these resources using the Grafana UI.
For more information, refer to [How it works](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/).
Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for configuration instructions. Refer to [Set up file provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/file-path-setup/) for configuration instructions.

View File

@ -19,20 +19,22 @@ weight: 400
# Manage provisioned repositories with Git Sync # Manage provisioned repositories with Git Sync
{{< admonition type="caution" >}} {{< admonition type="caution" >}}
Git Sync is an [experimental feature](https://grafana.com/docs/release-life-cycle/) introduced in Grafana v12 for open source and Enterprise editions. Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Enable the `provisioning` and `kubernetesDashboards` feature toggles in Grafana to use this feature. This feature is not publicly available in Grafana Cloud yet. Only the cloud-hosted version of GitHub (GitHub.com) is supported at this time. GitHub Enterprise is not yet compatible.
Sign up for Grafana Cloud Git Sync early access using [this form](https://forms.gle/WKkR3EVMcbqsNnkD9). Git Sync is available in [private preview](https://grafana.com/docs/release-life-cycle/) for Grafana Cloud, and is an [experimental feature](https://grafana.com/docs/release-life-cycle/) in Grafana v12 for open source and Enterprise editions.
Support and documentation is available but might be limited to enablement, configuration, and some troubleshooting. No SLAs are provided.
You can sign up to the private preview using the [Git Sync early access form](https://forms.gle/WKkR3EVMcbqsNnkD9).
{{< /admonition >}} {{< /admonition >}}
After you have set up Git Sync, you can synchronize any changes in your existing dashboards with your configured GitHub repository. Similarly, if you push a change in the repository, those changes are mirrored in your Grafana instance. After you have set up Git Sync, you can synchronize any changes you make in your existing provisioned folders in the UI with your configured GitHub repository. Similarly, if you push a change into your repository, those changes are mirrored in your Grafana instance.
## View current status of synchronization ## View current status of synchronization
Each repository synchronized with Git Sync has a dashboard that provides a summary of resources, health, pull status, webhook, sync jobs, resources, and files. When you synchronize a repository, Git Sync also creates a dashboard that provides a summary of resources, health, pull status, webhook, sync jobs, resources, and files.
Use the detailed information accessed in **View** to help troubleshoot and understand the health of your repository's connection with Grafana.
To view the current status, follow these steps. Use the **View** section in **Provisioning** to see detailed information about the current status of your sync, understand the health of your repository's connection with Grafana, and [troubleshoot](#troubleshoot-synchronization) possible issues:
1. Log in to your Grafana server with an account that has the Grafana Admin or Editor flag set. 1. Log in to your Grafana server with an account that has the Grafana Admin or Editor flag set.
1. Select **Administration** in the left-side menu and then **Provisioning**. 1. Select **Administration** in the left-side menu and then **Provisioning**.
@ -44,7 +46,7 @@ To view the current status, follow these steps.
Synchronizing resources from provisioned repositories into your Grafana instance pulls the resources into the selected folder. Existing dashboards with the same `uid` are overwritten. Synchronizing resources from provisioned repositories into your Grafana instance pulls the resources into the selected folder. Existing dashboards with the same `uid` are overwritten.
To sync changes from your dashboards with your Git repository: To sync changes from your Grafana dashboards with your Git repository:
1. From the left menu, select **Administration** > **Provisioning**. 1. From the left menu, select **Administration** > **Provisioning**.
1. Select **Pull** under the repository you want to sync. 1. Select **Pull** under the repository you want to sync.
@ -64,6 +66,12 @@ Refer to [Work with provisioned dashboards](../provisioned-dashboards) for infor
## Troubleshoot synchronization ## Troubleshoot synchronization
{{< admonition type="caution" >}}
Before you proceed to troubleshoot, understand the [known limitations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#known-limitations/).
{{< /admonition >}}
Monitor the **View** status page for synchronization issues and status updates. Common events include: Monitor the **View** status page for synchronization issues and status updates. Common events include:
- Sync started - Sync started

View File

@ -71,9 +71,12 @@ test.describe(
await test.step('3.Edit and restore default groupBy', async () => { await test.step('3.Edit and restore default groupBy', async () => {
const dashboardPage = await gotoDashboardPage({ uid: DASHBOARD_UNDER_TEST }); const dashboardPage = await gotoDashboardPage({ uid: DASHBOARD_UNDER_TEST });
// Wait for the page to load
const groupByVariable = getGroupByInput(dashboardPage, selectors);
await expect(groupByVariable).toBeVisible();
const initialSelectedOptionsCount = await groupByValues.count(); const initialSelectedOptionsCount = await groupByValues.count();
const groupByVariable = getGroupByInput(dashboardPage, selectors);
await groupByVariable.click(); await groupByVariable.click();
const groupByOption = groupByOptions.nth(1); const groupByOption = groupByOptions.nth(1);

View File

@ -1856,11 +1856,6 @@
"count": 1 "count": 1
} }
}, },
"public/app/features/commandPalette/actions/recentScopesActions.ts": {
"react-hooks/rules-of-hooks": {
"count": 1
}
},
"public/app/features/commandPalette/actions/scopeActions.tsx": { "public/app/features/commandPalette/actions/scopeActions.tsx": {
"react-hooks/rules-of-hooks": { "react-hooks/rules-of-hooks": {
"count": 4 "count": 4
@ -3383,11 +3378,6 @@
"count": 1 "count": 1
} }
}, },
"public/app/features/transformers/editors/GroupByTransformerEditor.tsx": {
"@typescript-eslint/consistent-type-assertions": {
"count": 1
}
},
"public/app/features/transformers/editors/GroupToNestedTableTransformerEditor.tsx": { "public/app/features/transformers/editors/GroupToNestedTableTransformerEditor.tsx": {
"no-restricted-syntax": { "no-restricted-syntax": {
"count": 1 "count": 1
@ -4552,11 +4542,6 @@
"count": 1 "count": 1
} }
}, },
"public/app/plugins/panel/logs/types.ts": {
"no-barrel-files/no-barrel-files": {
"count": 1
}
},
"public/app/plugins/panel/nodeGraph/Edge.tsx": { "public/app/plugins/panel/nodeGraph/Edge.tsx": {
"@typescript-eslint/consistent-type-assertions": { "@typescript-eslint/consistent-type-assertions": {
"count": 1 "count": 1

2
go.mod
View File

@ -247,6 +247,7 @@ require (
github.com/grafana/grafana/apps/plugins v0.0.0 // @grafana/plugins-platform-backend github.com/grafana/grafana/apps/plugins v0.0.0 // @grafana/plugins-platform-backend
github.com/grafana/grafana/apps/preferences v0.0.0 // @grafana/grafana-app-platform-squad github.com/grafana/grafana/apps/preferences v0.0.0 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/apps/provisioning v0.0.0 // @grafana/grafana-app-platform-squad github.com/grafana/grafana/apps/provisioning v0.0.0 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana/apps/scope v0.0.0 // @grafana/grafana-operator-experience-squad
github.com/grafana/grafana/apps/secret v0.0.0 // @grafana/grafana-operator-experience-squad github.com/grafana/grafana/apps/secret v0.0.0 // @grafana/grafana-operator-experience-squad
github.com/grafana/grafana/apps/shorturl v0.0.0 // @grafana/sharing-squad github.com/grafana/grafana/apps/shorturl v0.0.0 // @grafana/sharing-squad
github.com/grafana/grafana/pkg/aggregator v0.0.0 // @grafana/grafana-app-platform-squad github.com/grafana/grafana/pkg/aggregator v0.0.0 // @grafana/grafana-app-platform-squad
@ -275,6 +276,7 @@ replace (
github.com/grafana/grafana/apps/plugins => ./apps/plugins github.com/grafana/grafana/apps/plugins => ./apps/plugins
github.com/grafana/grafana/apps/preferences => ./apps/preferences github.com/grafana/grafana/apps/preferences => ./apps/preferences
github.com/grafana/grafana/apps/provisioning => ./apps/provisioning github.com/grafana/grafana/apps/provisioning => ./apps/provisioning
github.com/grafana/grafana/apps/scope => ./apps/scope
github.com/grafana/grafana/apps/secret => ./apps/secret github.com/grafana/grafana/apps/secret => ./apps/secret
github.com/grafana/grafana/apps/shorturl => ./apps/shorturl github.com/grafana/grafana/apps/shorturl => ./apps/shorturl

View File

@ -18,6 +18,7 @@ use (
./apps/plugins ./apps/plugins
./apps/preferences ./apps/preferences
./apps/provisioning ./apps/provisioning
./apps/scope
./apps/secret ./apps/secret
./apps/shorturl ./apps/shorturl
./pkg/aggregator ./pkg/aggregator

View File

@ -90,6 +90,7 @@ grafana::codegen:run apps/dashboard/pkg
grafana::codegen:run apps/provisioning/pkg grafana::codegen:run apps/provisioning/pkg
grafana::codegen:run apps/folder/pkg grafana::codegen:run apps/folder/pkg
grafana::codegen:run apps/preferences/pkg grafana::codegen:run apps/preferences/pkg
grafana::codegen:run apps/scope/pkg
grafana::codegen:run apps/alerting/alertenrichment/pkg grafana::codegen:run apps/alerting/alertenrichment/pkg
if [ -d "pkg/extensions/apis" ]; then if [ -d "pkg/extensions/apis" ]; then

View File

@ -302,6 +302,7 @@
"@locker/near-membrane-shared-dom": "0.14.0", "@locker/near-membrane-shared-dom": "0.14.0",
"@msagl/core": "^1.1.19", "@msagl/core": "^1.1.19",
"@msagl/parser": "^1.1.19", "@msagl/parser": "^1.1.19",
"@openfeature/web-sdk": "^1.6.1",
"@opentelemetry/api": "1.9.0", "@opentelemetry/api": "1.9.0",
"@opentelemetry/exporter-collector": "0.25.0", "@opentelemetry/exporter-collector": "0.25.0",
"@opentelemetry/semantic-conventions": "1.37.0", "@opentelemetry/semantic-conventions": "1.37.0",

View File

@ -1,5 +1,12 @@
import { ThemeColors } from './createColors'; import { ThemeColors } from './createColors';
import { ThemeShadows } from './createShadows'; import { ThemeShadows } from './createShadows';
import type { Radii } from './createShape';
import type { ThemeSpacingTokens } from './createSpacing';
interface MenuComponentTokens {
borderRadius: keyof Radii;
padding: ThemeSpacingTokens;
}
/** @beta */ /** @beta */
export interface ThemeComponents { export interface ThemeComponents {
@ -53,6 +60,7 @@ export interface ThemeComponents {
rowHoverBackground: string; rowHoverBackground: string;
rowSelected: string; rowSelected: string;
}; };
menu: MenuComponentTokens;
} }
export function createComponents(colors: ThemeColors, shadows: ThemeShadows): ThemeComponents { export function createComponents(colors: ThemeColors, shadows: ThemeShadows): ThemeComponents {
@ -71,6 +79,11 @@ export function createComponents(colors: ThemeColors, shadows: ThemeShadows): Th
background: colors.mode === 'dark' ? colors.background.canvas : colors.background.primary, background: colors.mode === 'dark' ? colors.background.canvas : colors.background.primary,
}; };
const menu: MenuComponentTokens = {
borderRadius: 'default',
padding: 0.5,
};
return { return {
height: { height: {
sm: 3, sm: 3,
@ -114,5 +127,6 @@ export function createComponents(colors: ThemeColors, shadows: ThemeShadows): Th
rowHoverBackground: colors.action.hover, rowHoverBackground: colors.action.hover,
rowSelected: colors.action.selected, rowSelected: colors.action.selected,
}, },
menu,
}; };
} }

View File

@ -165,6 +165,8 @@ export type PluginExtensionOpenModalOptions = {
export type PluginExtensionEventHelpers<Context extends object = object> = { export type PluginExtensionEventHelpers<Context extends object = object> = {
context?: Readonly<Context>; context?: Readonly<Context>;
// The ID of the extension point that triggered this event
extensionPointId: string;
// Opens a modal dialog and renders the provided React component inside it // Opens a modal dialog and renders the provided React component inside it
openModal: (options: PluginExtensionOpenModalOptions) => void; openModal: (options: PluginExtensionOpenModalOptions) => void;
/** /**

View File

@ -58,6 +58,9 @@
"@grafana/faro-web-sdk": "^1.13.2", "@grafana/faro-web-sdk": "^1.13.2",
"@grafana/schema": "12.3.0-pre", "@grafana/schema": "12.3.0-pre",
"@grafana/ui": "12.3.0-pre", "@grafana/ui": "12.3.0-pre",
"@openfeature/core": "^1.9.0",
"@openfeature/ofrep-web-provider": "^0.3.3",
"@openfeature/web-sdk": "^1.6.1",
"@types/systemjs": "6.15.3", "@types/systemjs": "6.15.3",
"history": "4.10.1", "history": "4.10.1",
"lodash": "4.17.21", "lodash": "4.17.21",

View File

@ -27,3 +27,5 @@ export {
} from '../services/pluginExtensions/getObservablePluginLinks'; } from '../services/pluginExtensions/getObservablePluginLinks';
export { UserStorage } from '../utils/userStorage'; export { UserStorage } from '../utils/userStorage';
export { initOpenFeature, evaluateBooleanFlag } from './openFeature';

View File

@ -0,0 +1,33 @@
import { OFREPWebProvider } from '@openfeature/ofrep-web-provider';
import { OpenFeature } from '@openfeature/web-sdk';
import { FeatureToggles } from '@grafana/data';
import { config } from '../../config';
export type FeatureFlagName = keyof FeatureToggles;
export async function initOpenFeature() {
/**
* Note: Currently we don't have a way to override OpenFeature flags for tests or localStorage.
* A few improvements we could make:
* - When running in tests (unit or e2e?), we could use InMemoryProvider instead
* - Use Multi-Provider to combine InMemoryProvider (for localStorage) with OFREPWebProvider
* to allow for overrides https://github.com/open-feature/js-sdk-contrib/tree/main/libs/providers/multi-provider
*/
const ofProvider = new OFREPWebProvider({
baseUrl: '/apis/features.grafana.app/v0alpha1/namespaces/' + config.namespace,
pollInterval: -1, // disable polling
timeoutMs: 5_000,
});
await OpenFeature.setProviderAndWait(ofProvider, {
targetingKey: config.namespace,
namespace: config.namespace,
});
}
export function evaluateBooleanFlag(flagName: FeatureFlagName, defaultValue: boolean): boolean {
return OpenFeature.getClient().getBooleanValue(flagName, defaultValue);
}

View File

@ -41,6 +41,7 @@ export interface Options {
showCommonLabels: boolean; showCommonLabels: boolean;
showControls?: boolean; showControls?: boolean;
showLabels: boolean; showLabels: boolean;
showLogAttributes?: boolean;
showLogContextToggle: boolean; showLogContextToggle: boolean;
showTime: boolean; showTime: boolean;
sortOrder: common.LogsSortOrder; sortOrder: common.LogsSortOrder;

View File

@ -37,7 +37,7 @@ const listFoldersHandler = () =>
const limit = parseInt(url.searchParams.get('limit') ?? '1000', 10); const limit = parseInt(url.searchParams.get('limit') ?? '1000', 10);
const page = parseInt(url.searchParams.get('page') ?? '1', 10); const page = parseInt(url.searchParams.get('page') ?? '1', 10);
const tree = permission === 'Edit' ? mockTreeThatViewersCanEdit : mockTree; const tree = permission?.toLowerCase() === 'edit' ? mockTreeThatViewersCanEdit : mockTree;
// reconstruct a folder API response from the flat tree fixture // reconstruct a folder API response from the flat tree fixture
const folders = tree const folders = tree

View File

@ -92,7 +92,7 @@ export const CollapsableSection = ({
{loading ? ( {loading ? (
<Spinner className={styles.spinner} /> <Spinner className={styles.spinner} />
) : ( ) : (
<Icon name={isSectionOpen ? 'angle-up' : 'angle-down'} className={styles.icon} /> <Icon name={isSectionOpen ? 'angle-down' : 'angle-right'} className={styles.icon} />
)} )}
</button> </button>
<div className={styles.label} id={`collapse-label-${id}`} data-testid={headerDataTestId}> <div className={styles.label} id={`collapse-label-${id}`} data-testid={headerDataTestId}>
@ -107,17 +107,18 @@ export const CollapsableSection = ({
const collapsableSectionStyles = (theme: GrafanaTheme2) => ({ const collapsableSectionStyles = (theme: GrafanaTheme2) => ({
header: css({ header: css({
display: 'flex', display: 'flex',
alignItems: 'center',
cursor: 'pointer', cursor: 'pointer',
boxSizing: 'border-box', boxSizing: 'border-box',
flexDirection: 'row-reverse',
position: 'relative', position: 'relative',
justifyContent: 'space-between', justifyContent: 'flex-start',
fontSize: theme.typography.size.lg, fontSize: theme.typography.size.lg,
padding: `${theme.spacing(0.5)} 0`, padding: `${theme.spacing(0.5)} 0`,
'&:focus-within': getFocusStyles(theme), '&:focus-within': getFocusStyles(theme),
}), }),
button: css({ button: css({
all: 'unset', all: 'unset',
marginRight: theme.spacing(1),
'&:focus-visible': { '&:focus-visible': {
outline: 'none', outline: 'none',
outlineOffset: 'unset', outlineOffset: 'unset',
@ -141,6 +142,7 @@ const collapsableSectionStyles = (theme: GrafanaTheme2) => ({
}), }),
label: css({ label: css({
display: 'flex', display: 'flex',
flex: '1 1 auto',
fontWeight: theme.typography.fontWeightMedium, fontWeight: theme.typography.fontWeightMedium,
color: theme.colors.text.maxContrast, color: theme.colors.text.maxContrast,
}), }),

View File

@ -14,6 +14,9 @@ export interface ErrorBoundaryApi {
} }
interface Props { interface Props {
/** Name of the error boundary. Used when reporting errors in Faro. */
boundaryName?: string;
children: (r: ErrorBoundaryApi) => ReactNode; children: (r: ErrorBoundaryApi) => ReactNode;
/** Will re-render children after error if recover values changes */ /** Will re-render children after error if recover values changes */
dependencies?: unknown[]; dependencies?: unknown[];
@ -37,10 +40,15 @@ export class ErrorBoundary extends PureComponent<Props, State> {
}; };
componentDidCatch(error: Error, errorInfo: ErrorInfo) { componentDidCatch(error: Error, errorInfo: ErrorInfo) {
const logger = this.props.errorLogger ?? faro?.api?.pushError; if (this.props.errorLogger) {
this.props.errorLogger(error);
if (logger) { } else {
logger(error); faro?.api?.pushError(error, {
type: 'boundary',
context: {
source: this.props.boundaryName ?? 'unknown',
},
});
} }
this.setState({ error, errorInfo }); this.setState({ error, errorInfo });
@ -85,6 +93,9 @@ export class ErrorBoundary extends PureComponent<Props, State> {
* @public * @public
*/ */
export interface ErrorBoundaryAlertProps { export interface ErrorBoundaryAlertProps {
/** Name of the error boundary. Used when reporting errors in Faro. */
boundaryName?: string;
/** Title for the error boundary alert */ /** Title for the error boundary alert */
title?: string; title?: string;
@ -107,10 +118,10 @@ export class ErrorBoundaryAlert extends PureComponent<ErrorBoundaryAlertProps> {
}; };
render() { render() {
const { title, children, style, dependencies, errorLogger } = this.props; const { title, children, style, dependencies, errorLogger, boundaryName } = this.props;
return ( return (
<ErrorBoundary dependencies={dependencies} errorLogger={errorLogger}> <ErrorBoundary dependencies={dependencies} errorLogger={errorLogger} boundaryName={boundaryName}>
{({ error, errorInfo }) => { {({ error, errorInfo }) => {
if (!errorInfo) { if (!errorInfo) {
return children; return children;

View File

@ -25,6 +25,7 @@ export interface MenuProps extends React.HTMLAttributes<HTMLDivElement> {
const MenuComp = React.forwardRef<HTMLDivElement, MenuProps>( const MenuComp = React.forwardRef<HTMLDivElement, MenuProps>(
({ header, children, ariaLabel, onOpen, onClose, onKeyDown, ...otherProps }, forwardedRef) => { ({ header, children, ariaLabel, onOpen, onClose, onKeyDown, ...otherProps }, forwardedRef) => {
const styles = useStyles2(getStyles); const styles = useStyles2(getStyles);
const componentTokens = useComponentTokens();
const localRef = useRef<HTMLDivElement>(null); const localRef = useRef<HTMLDivElement>(null);
useImperativeHandle(forwardedRef, () => localRef.current!); useImperativeHandle(forwardedRef, () => localRef.current!);
@ -36,12 +37,11 @@ const MenuComp = React.forwardRef<HTMLDivElement, MenuProps>(
{...otherProps} {...otherProps}
aria-label={ariaLabel} aria-label={ariaLabel}
backgroundColor="elevated" backgroundColor="elevated"
borderRadius="default" borderRadius={componentTokens.borderRadius}
boxShadow="z3" boxShadow="z3"
display="inline-block" display="inline-block"
onKeyDown={handleKeys} onKeyDown={handleKeys}
paddingX={0.5} padding={componentTokens.padding}
paddingY={0.5}
ref={localRef} ref={localRef}
role="menu" role="menu"
tabIndex={-1} tabIndex={-1}
@ -70,6 +70,18 @@ export const Menu = Object.assign(MenuComp, {
Group: MenuGroup, Group: MenuGroup,
}); });
const useComponentTokens = () =>
useStyles2((theme: GrafanaTheme2) => {
const {
components: { menu },
} = theme;
return {
padding: menu.padding,
borderRadius: menu.borderRadius,
};
});
const getStyles = (theme: GrafanaTheme2) => { const getStyles = (theme: GrafanaTheme2) => {
return { return {
header: css({ header: css({

View File

@ -6,7 +6,7 @@ import { GrafanaTheme2, LinkTarget } from '@grafana/data';
import { t } from '@grafana/i18n'; import { t } from '@grafana/i18n';
import { useStyles2 } from '../../themes/ThemeContext'; import { useStyles2 } from '../../themes/ThemeContext';
import { getFocusStyles } from '../../themes/mixins'; import { getFocusStyles, getInternalRadius } from '../../themes/mixins';
import { IconName } from '../../types/icon'; import { IconName } from '../../types/icon';
import { Icon } from '../Icon/Icon'; import { Icon } from '../Icon/Icon';
import { Stack } from '../Layout/Stack/Stack'; import { Stack } from '../Layout/Stack/Stack';
@ -213,6 +213,8 @@ export const MenuItem = React.memo(
MenuItem.displayName = 'MenuItem'; MenuItem.displayName = 'MenuItem';
const getStyles = (theme: GrafanaTheme2) => { const getStyles = (theme: GrafanaTheme2) => {
const menuPadding = theme.components.menu.padding * theme.spacing.gridSize;
return { return {
item: css({ item: css({
background: 'none', background: 'none',
@ -225,7 +227,7 @@ const getStyles = (theme: GrafanaTheme2) => {
justifyContent: 'center', justifyContent: 'center',
padding: theme.spacing(0.5, 1.5), padding: theme.spacing(0.5, 1.5),
minHeight: theme.spacing(4), minHeight: theme.spacing(4),
borderRadius: theme.shape.radius.default, borderRadius: getInternalRadius(theme, menuPadding, { parentBorderWidth: 0 }),
margin: 0, margin: 0,
border: 'none', border: 'none',
width: '100%', width: '100%',

View File

@ -11,9 +11,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/grafana/grafana/pkg/services/featuremgmt"
_ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof" _ "github.com/grafana/pyroscope-go/godeltaprof/http/pprof"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"github.com/grafana/grafana/pkg/api" "github.com/grafana/grafana/pkg/api"
@ -21,8 +19,10 @@ import (
"github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/metrics" "github.com/grafana/grafana/pkg/infra/metrics"
"github.com/grafana/grafana/pkg/infra/process" "github.com/grafana/grafana/pkg/infra/process"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/server" "github.com/grafana/grafana/pkg/server"
"github.com/grafana/grafana/pkg/services/apiserver/standalone" "github.com/grafana/grafana/pkg/services/apiserver/standalone"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/setting"
) )
@ -111,6 +111,11 @@ func RunServer(opts standalone.BuildInfo, cli *cli.Context) error {
return err return err
} }
// Initialize tracing early to ensure it's always available for other services
if err := tracing.InitTracing(cfg); err != nil {
return err
}
s, err := server.Initialize( s, err := server.Initialize(
cli.Context, cli.Context,
cfg, cfg,

View File

@ -57,4 +57,5 @@ import (
_ "github.com/grafana/tempo/pkg/traceql" _ "github.com/grafana/tempo/pkg/traceql"
_ "github.com/grafana/grafana/apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1" _ "github.com/grafana/grafana/apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1"
_ "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1"
) )

View File

@ -11,6 +11,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
jaegerpropagator "go.opentelemetry.io/contrib/propagators/jaeger" jaegerpropagator "go.opentelemetry.io/contrib/propagators/jaeger"
"go.opentelemetry.io/contrib/samplers/jaegerremote" "go.opentelemetry.io/contrib/samplers/jaegerremote"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
@ -27,11 +29,9 @@ import (
"go.opentelemetry.io/otel/trace/noop" "go.opentelemetry.io/otel/trace/noop"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
"github.com/grafana/grafana/pkg/apimachinery/errutil" "github.com/grafana/grafana/pkg/apimachinery/errutil"
"github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/setting"
) )
const ( const (
@ -105,6 +105,23 @@ func ProvideService(tracingCfg *TracingConfig) (*TracingService, error) {
return ots, nil return ots, nil
} }
// InitTracing initializes the tracing service with the provided configuration.
// Used to initialize tracing early to ensure it's always available for other
// services, outside of the wire context.
func InitTracing(cfg *setting.Cfg) error {
tracingCfg, err := ParseTracingConfig(cfg)
if err != nil {
return fmt.Errorf("parse tracing config: %w", err)
}
_, err = ProvideService(tracingCfg)
if err != nil {
return fmt.Errorf("initialize tracing: %w", err)
}
return nil
}
func NewNoopTracerService() *TracingService { func NewNoopTracerService() *TracingService {
tp := &noopTracerProvider{TracerProvider: noop.NewTracerProvider()} tp := &noopTracerProvider{TracerProvider: noop.NewTracerProvider()}
otel.SetTracerProvider(tp) otel.SetTracerProvider(tp)

View File

@ -0,0 +1,35 @@
package preferences
import (
"context"
"fmt"
"k8s.io/apiserver/pkg/admission"
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
)
func (b *APIBuilder) Mutate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) {
switch a.GetOperation() {
case admission.Create, admission.Update:
// ignore anything that is not CREATE | UPDATE
default:
return nil
}
obj := a.GetObject()
if obj == nil {
return nil
}
switch a.GetResource().Resource {
case "stars":
stars, ok := obj.(*preferences.Stars)
if !ok {
return fmt.Errorf("expected stars object: (%T)", obj)
}
stars.Spec.Normalize()
return nil
}
return nil
}

View File

@ -26,8 +26,11 @@ func mustTemplate(filename string) *template.Template {
// Templates. // Templates.
var ( var (
sqlStarsQuery = mustTemplate("sql_stars_query.sql") sqlDashboardStarsQuery = mustTemplate("sql_dashboard_stars.sql")
sqlStarsRV = mustTemplate("sql_stars_rv.sql") sqlDashboardStarsRV = mustTemplate("sql_dashboard_stars_rv.sql")
sqlHistoryStarsQuery = mustTemplate("sql_history_stars.sql")
sqlHistoryStarsInsert = mustTemplate("sql_history_stars_insert.sql")
sqlHistoryStarsDelete = mustTemplate("sql_history_stars_delete.sql")
sqlPreferencesQuery = mustTemplate("sql_preferences_query.sql") sqlPreferencesQuery = mustTemplate("sql_preferences_query.sql")
sqlPreferencesRV = mustTemplate("sql_preferences_rv.sql") sqlPreferencesRV = mustTemplate("sql_preferences_rv.sql")
sqlTeams = mustTemplate("sql_teams.sql") sqlTeams = mustTemplate("sql_teams.sql")
@ -38,9 +41,14 @@ type starQuery struct {
OrgID int64 // >= 1 if UserID != "" OrgID int64 // >= 1 if UserID != ""
UserUID string UserUID string
UserID int64 // for stars
QueryUIDs []string
QueryUID string
StarTable string StarTable string
UserTable string UserTable string
QueryHistoryStarsTable string
QueryHistoryTable string
} }
func (r starQuery) Validate() error { func (r starQuery) Validate() error {
@ -59,6 +67,8 @@ func newStarQueryReq(sql *legacysql.LegacyDatabaseHelper, user string, orgId int
StarTable: sql.Table("star"), StarTable: sql.Table("star"),
UserTable: sql.Table("user"), UserTable: sql.Table("user"),
QueryHistoryStarsTable: sql.Table("query_history_star"),
QueryHistoryTable: sql.Table("query_history"),
} }
} }

View File

@ -23,6 +23,15 @@ func TestStarsQueries(t *testing.T) {
return &v return &v
} }
getHistoryReq := func(orgId int64, userId int64, stars []string, star string) sqltemplate.SQLTemplate {
v := newStarQueryReq(nodb, "", orgId)
v.UserID = userId
v.QueryUIDs = stars
v.QueryUID = star
v.SQLTemplate = mocks.NewTestingSQLTemplate()
return &v
}
getPreferencesQuery := func(orgId int64, cb func(q *preferencesQuery)) sqltemplate.SQLTemplate { getPreferencesQuery := func(orgId int64, cb func(q *preferencesQuery)) sqltemplate.SQLTemplate {
v := newPreferencesQueryReq(nodb, orgId) v := newPreferencesQueryReq(nodb, orgId)
v.SQLTemplate = mocks.NewTestingSQLTemplate() v.SQLTemplate = mocks.NewTestingSQLTemplate()
@ -40,7 +49,7 @@ func TestStarsQueries(t *testing.T) {
RootDir: "testdata", RootDir: "testdata",
SQLTemplatesFS: sqlTemplatesFS, SQLTemplatesFS: sqlTemplatesFS,
Templates: map[*template.Template][]mocks.TemplateTestCase{ Templates: map[*template.Template][]mocks.TemplateTestCase{
sqlStarsQuery: { sqlDashboardStarsQuery: {
{ {
Name: "all", Name: "all",
Data: getStarQuery(0, ""), Data: getStarQuery(0, ""),
@ -54,12 +63,42 @@ func TestStarsQueries(t *testing.T) {
Data: getStarQuery(3, "abc"), Data: getStarQuery(3, "abc"),
}, },
}, },
sqlStarsRV: { sqlDashboardStarsRV: {
{ {
Name: "get", Name: "get",
Data: getStarQuery(0, ""), Data: getStarQuery(0, ""),
}, },
}, },
sqlHistoryStarsQuery: {
{
Name: "user",
Data: getStarQuery(1, "abc"),
},
},
sqlHistoryStarsQuery: {
{
Name: "org",
Data: getStarQuery(1, ""),
},
},
sqlHistoryStarsInsert: {
{
Name: "add star",
Data: getHistoryReq(1, 3, nil, "XXX"),
},
},
sqlHistoryStarsDelete: {
{
Name: "remove star",
Data: getHistoryReq(1, 3, []string{"xxx", "yyy"}, ""),
},
},
sqlHistoryStarsDelete: {
{
Name: "remove all star",
Data: getHistoryReq(1, 3, nil, ""),
},
},
sqlPreferencesQuery: { sqlPreferencesQuery: {
{ {
Name: "all", Name: "all",

View File

@ -12,6 +12,7 @@ import (
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1" preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity" "github.com/grafana/grafana/pkg/apimachinery/identity"
pref "github.com/grafana/grafana/pkg/services/preference" pref "github.com/grafana/grafana/pkg/services/preference"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/storage/legacysql" "github.com/grafana/grafana/pkg/storage/legacysql"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate" "github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
) )
@ -58,13 +59,16 @@ func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user str
req := newStarQueryReq(sql, user, orgId) req := newStarQueryReq(sql, user, orgId)
q, err := sqltemplate.Execute(sqlStarsQuery, req) q, err := sqltemplate.Execute(sqlDashboardStarsQuery, req)
if err != nil { if err != nil {
return nil, 0, fmt.Errorf("execute template %q: %w", sqlStarsQuery.Name(), err) return nil, 0, fmt.Errorf("execute template %q: %w", sqlDashboardStarsQuery.Name(), err)
} }
sess := sql.DB.GetSqlxSession() sess := sql.DB.GetSqlxSession()
rows, err := sess.Query(ctx, q, req.GetArgs()...) rows, err := sess.Query(ctx, q, req.GetArgs()...)
if err != nil {
return nil, 0, err
}
defer func() { defer func() {
if rows != nil { if rows != nil {
_ = rows.Close() _ = rows.Close()
@ -111,7 +115,7 @@ func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user str
// Find the RV unless it is a user query // Find the RV unless it is a user query
if userUID == "" { if userUID == "" {
req.Reset() req.Reset()
q, err = sqltemplate.Execute(sqlStarsRV, req) q, err = sqltemplate.Execute(sqlDashboardStarsRV, req)
if err != nil { if err != nil {
return nil, 0, fmt.Errorf("execute template %q: %w", sqlPreferencesRV.Name(), err) return nil, 0, fmt.Errorf("execute template %q: %w", sqlPreferencesRV.Name(), err)
} }
@ -132,6 +136,90 @@ func (s *LegacySQL) getDashboardStars(ctx context.Context, orgId int64, user str
return stars, updated.UnixMilli(), err return stars, updated.UnixMilli(), err
} }
func (s *LegacySQL) getHistoryStars(ctx context.Context, orgId int64, user string) (map[string][]string, error) {
sql, err := s.db(ctx)
if err != nil {
return nil, err
}
req := newStarQueryReq(sql, user, orgId)
q, err := sqltemplate.Execute(sqlHistoryStarsQuery, req)
if err != nil {
return nil, fmt.Errorf("execute template %q: %w", sqlHistoryStarsQuery.Name(), err)
}
sess := sql.DB.GetSqlxSession()
rows, err := sess.Query(ctx, q, req.GetArgs()...)
if err != nil {
return nil, err
}
defer func() {
if rows != nil {
_ = rows.Close()
}
}()
last := user
res := make(map[string][]string)
buffer := make([]string, 0, 10)
var uid string
for rows.Next() {
err := rows.Scan(&uid, &user)
if err != nil {
return nil, err
}
if user != last && len(buffer) > 0 {
res[last] = buffer
buffer = make([]string, 0, 10)
}
buffer = append(buffer, uid)
last = user
}
res[last] = buffer
return res, nil
}
func (s *LegacySQL) removeHistoryStar(ctx context.Context, user *user.User, stars []string) error {
sql, err := s.db(ctx)
if err != nil {
return err
}
req := newStarQueryReq(sql, "", user.OrgID)
req.UserID = user.ID
if len(stars) > 0 {
req.QueryUIDs = stars
}
q, err := sqltemplate.Execute(sqlHistoryStarsDelete, req)
if err != nil {
return fmt.Errorf("execute template %q: %w", sqlHistoryStarsDelete.Name(), err)
}
sess := sql.DB.GetSqlxSession()
_, err = sess.Exec(ctx, q, req.GetArgs()...)
return err
}
func (s *LegacySQL) addHistoryStar(ctx context.Context, user *user.User, star string) error {
sql, err := s.db(ctx)
if err != nil {
return err
}
req := newStarQueryReq(sql, "", user.OrgID)
req.UserID = user.ID
req.QueryUID = star
q, err := sqltemplate.Execute(sqlHistoryStarsDelete, req)
if err != nil {
return fmt.Errorf("execute template %q: %w", sqlHistoryStarsDelete.Name(), err)
}
sess := sql.DB.GetSqlxSession()
_, err = sess.Exec(ctx, q, req.GetArgs()...)
return err
}
// List all defined preferences in an org (valid for admin users only) // List all defined preferences in an org (valid for admin users only)
func (s *LegacySQL) listPreferences(ctx context.Context, func (s *LegacySQL) listPreferences(ctx context.Context,
ns string, orgId int64, ns string, orgId int64,

View File

@ -0,0 +1,9 @@
SELECT s.query_uid, u.uid as user_uid
FROM {{ .Ident .QueryHistoryStarsTable }} as s
JOIN {{ .Ident .QueryHistoryTable }} as h ON s.query_uid = h.uid
JOIN {{ .Ident .UserTable }} as u ON s.user_id = u.id
WHERE s.org_id = {{ .Arg .OrgID }}
{{ if .UserUID }}
AND u.uid = {{ .Arg .UserUID }}
{{ end }}
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc

View File

@ -0,0 +1,6 @@
DELETE FROM {{ .Ident .QueryHistoryStarsTable }}
WHERE org_id = {{ .Arg .OrgID }}
AND user_id = {{ .Arg .UserID }}
{{ if .QueryUIDs }}
AND query_uid IN ({{ .ArgList .QueryUIDs }})
{{ end }}

View File

@ -0,0 +1,4 @@
INSERT INTO {{ .Ident .QueryHistoryStarsTable }}
( query_uid, user_id, org_id )
VALUES
( {{ .Arg .QueryUID }}, {{ .Arg .UserID }}, {{ .Arg .OrgID }} )

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"math/rand" "math/rand"
"slices"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -12,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/internalversion" "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/registry/rest"
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
@ -107,8 +109,13 @@ func (s *DashboardStarsStorage) List(ctx context.Context, options *internalversi
if err != nil { if err != nil {
return nil, err return nil, err
} }
history, err := s.sql.getHistoryStars(ctx, ns.OrgID, "")
if err != nil {
return nil, err
}
for _, v := range found { for _, v := range found {
list.Items = append(list.Items, asStarsResource(s.namespacer(v.OrgID), &v)) list.Items = append(list.Items,
asStarsResource(s.namespacer(v.OrgID), &v, history[v.UserUID]))
} }
if rv > 0 { if rv > 0 {
list.ResourceVersion = strconv.FormatInt(rv, 10) list.ResourceVersion = strconv.FormatInt(rv, 10)
@ -141,19 +148,25 @@ func (s *DashboardStarsStorage) Get(ctx context.Context, name string, options *m
if err != nil { if err != nil {
return nil, err return nil, err
} }
history, err := s.sql.getHistoryStars(ctx, ns.OrgID, owner.Identifier)
if err != nil {
return nil, err
}
if len(found) == 0 || len(found[0].Dashboards) == 0 { if len(found) == 0 || len(found[0].Dashboards) == 0 {
return nil, apiserrors.NewNotFound(preferences.StarsResourceInfo.GroupResource(), name) return nil, apiserrors.NewNotFound(preferences.StarsResourceInfo.GroupResource(), name)
} }
obj := asStarsResource(ns.Value, &found[0]) obj := asStarsResource(ns.Value, &found[0], history[owner.Identifier])
return &obj, nil return &obj, nil
} }
func getDashboardStars(stars *preferences.Stars) []string { func getStars(stars *preferences.Stars, gk schema.GroupKind) []string {
if stars == nil || len(stars.Spec.Resource) == 0 { if stars == nil || len(stars.Spec.Resource) == 0 {
return []string{} return []string{}
} }
for _, r := range stars.Spec.Resource { for _, r := range stars.Spec.Resource {
if r.Group == "dashboard.grafana.app" && r.Kind == "Dashboard" { if r.Group == gk.Group && r.Kind == gk.Kind {
return r.Names return r.Names
} }
} }
@ -161,7 +174,7 @@ func getDashboardStars(stars *preferences.Stars) []string {
} }
// Create implements rest.Creater. // Create implements rest.Creater.
func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Stars, old *preferences.Stars) (runtime.Object, error) { func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Stars) (runtime.Object, error) {
ns, owner, err := getNamespaceAndOwner(ctx, obj.Name) ns, owner, err := getNamespaceAndOwner(ctx, obj.Name)
if err != nil { if err != nil {
return nil, err return nil, err
@ -177,7 +190,7 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Star
return nil, fmt.Errorf("namespace mismatch") return nil, fmt.Errorf("namespace mismatch")
} }
stars := getDashboardStars(obj) stars := getStars(obj, schema.GroupKind{Group: "dashboard.grafana.app", Kind: "Dashboard"})
if len(stars) == 0 { if len(stars) == 0 {
err = s.stars.DeleteByUser(ctx, user.ID) err = s.stars.DeleteByUser(ctx, user.ID)
return &preferences.Stars{ObjectMeta: metav1.ObjectMeta{ return &preferences.Stars{ObjectMeta: metav1.ObjectMeta{
@ -232,6 +245,31 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *preferences.Star
changed = true changed = true
} }
// Apply history stars
stars = getStars(obj, schema.GroupKind{Group: "history.grafana.app", Kind: "Query"})
res, err := s.sql.getHistoryStars(ctx, user.OrgID, user.UID)
if err != nil {
return nil, err
}
history := res[user.UID]
if !slices.Equal(stars, history) {
changed = true
if len(stars) == 0 {
err = s.sql.removeHistoryStar(ctx, user, nil)
if err != nil {
return nil, err
}
} else {
added, removed, _ := preferences.Changes(history, stars)
if len(removed) > 0 {
_ = s.sql.removeHistoryStar(ctx, user, nil)
}
for _, v := range added {
_ = s.sql.addHistoryStar(ctx, user, v) // one at a time so duplicates do not fail everything
}
}
}
if changed { if changed {
return s.Get(ctx, obj.Name, &metav1.GetOptions{}) return s.Get(ctx, obj.Name, &metav1.GetOptions{})
} }
@ -245,7 +283,7 @@ func (s *DashboardStarsStorage) Create(ctx context.Context, obj runtime.Object,
return nil, fmt.Errorf("expected stars object") return nil, fmt.Errorf("expected stars object")
} }
return s.write(ctx, stars, nil) return s.write(ctx, stars)
} }
// Update implements rest.Updater. // Update implements rest.Updater.
@ -265,13 +303,13 @@ func (s *DashboardStarsStorage) Update(ctx context.Context, name string, objInfo
return nil, false, fmt.Errorf("expected stars object") return nil, false, fmt.Errorf("expected stars object")
} }
obj, err = s.write(ctx, stars, old.(*preferences.Stars)) obj, err = s.write(ctx, stars)
return obj, false, err return obj, false, err
} }
// Delete implements rest.GracefulDeleter. // Delete implements rest.GracefulDeleter.
func (s *DashboardStarsStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { func (s *DashboardStarsStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
obj, err := s.write(ctx, &preferences.Stars{ObjectMeta: metav1.ObjectMeta{Name: name}}, nil) obj, err := s.write(ctx, &preferences.Stars{ObjectMeta: metav1.ObjectMeta{Name: name}})
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
@ -283,8 +321,8 @@ func (s *DashboardStarsStorage) DeleteCollection(ctx context.Context, deleteVali
return nil, fmt.Errorf("not implemented yet") return nil, fmt.Errorf("not implemented yet")
} }
func asStarsResource(ns string, v *dashboardStars) preferences.Stars { func asStarsResource(ns string, v *dashboardStars, history []string) preferences.Stars {
return preferences.Stars{ stars := preferences.Stars{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("user-%s", v.UserUID), Name: fmt.Sprintf("user-%s", v.UserUID),
Namespace: ns, Namespace: ns,
@ -299,4 +337,13 @@ func asStarsResource(ns string, v *dashboardStars) preferences.Stars {
}}, }},
}, },
} }
if len(history) > 0 {
stars.Spec.Resource = append(stars.Spec.Resource, preferences.StarsResource{
Group: "history.grafana.app",
Kind: "Query",
Names: history,
})
}
stars.Spec.Normalize()
return stars
} }

View File

@ -0,0 +1,6 @@
SELECT s.query_uid, u.uid as user_uid
FROM `grafana`.`query_history_star` as s
JOIN `grafana`.`query_history` as h ON s.query_uid = h.uid
JOIN `grafana`.`user` as u ON s.user_id = u.id
WHERE s.org_id = 1
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc

View File

@ -0,0 +1,3 @@
DELETE FROM `grafana`.`query_history_star`
WHERE org_id = 1
AND user_id = 3

View File

@ -0,0 +1,4 @@
INSERT INTO `grafana`.`query_history_star`
( query_uid, user_id, org_id )
VALUES
( 'XXX', 3, 1 )

View File

@ -0,0 +1,6 @@
SELECT s.query_uid, u.uid as user_uid
FROM "grafana"."query_history_star" as s
JOIN "grafana"."query_history" as h ON s.query_uid = h.uid
JOIN "grafana"."user" as u ON s.user_id = u.id
WHERE s.org_id = 1
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc

View File

@ -0,0 +1,3 @@
DELETE FROM "grafana"."query_history_star"
WHERE org_id = 1
AND user_id = 3

View File

@ -0,0 +1,4 @@
INSERT INTO "grafana"."query_history_star"
( query_uid, user_id, org_id )
VALUES
( 'XXX', 3, 1 )

View File

@ -0,0 +1,6 @@
SELECT s.query_uid, u.uid as user_uid
FROM "grafana"."query_history_star" as s
JOIN "grafana"."query_history" as h ON s.query_uid = h.uid
JOIN "grafana"."user" as u ON s.user_id = u.id
WHERE s.org_id = 1
ORDER BY s.org_id asc, s.user_id asc, s.query_uid asc

View File

@ -0,0 +1,3 @@
DELETE FROM "grafana"."query_history_star"
WHERE org_id = 1
AND user_id = 3

View File

@ -0,0 +1,4 @@
INSERT INTO "grafana"."query_history_star"
( query_uid, user_id, org_id )
VALUES
( 'XXX', 3, 1 )

View File

@ -29,7 +29,10 @@ import (
"github.com/grafana/grafana/pkg/storage/legacysql" "github.com/grafana/grafana/pkg/storage/legacysql"
) )
var _ builder.APIGroupBuilder = (*APIBuilder)(nil) var (
_ builder.APIGroupBuilder = (*APIBuilder)(nil)
_ builder.APIGroupMutation = (*APIBuilder)(nil)
)
type APIBuilder struct { type APIBuilder struct {
authorizer authorizer.Authorizer authorizer authorizer.Authorizer
@ -112,7 +115,7 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI
if err != nil { if err != nil {
return err return err
} }
stars = &starStorage{store: stars} // wrap List so we only return one value stars = &starStorage{Storage: stars} // wrap List so we only return one value
if b.legacyStars != nil && opts.DualWriteBuilder != nil { if b.legacyStars != nil && opts.DualWriteBuilder != nil {
stars, err = opts.DualWriteBuilder(resource.GroupResource(), b.legacyStars, stars) stars, err = opts.DualWriteBuilder(resource.GroupResource(), b.legacyStars, stars)
if err != nil { if err != nil {

View File

@ -6,7 +6,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/internalversion" "k8s.io/apimachinery/pkg/apis/meta/internalversion"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/rest"
authlib "github.com/grafana/authlib/types" authlib "github.com/grafana/authlib/types"
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1" preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
@ -17,7 +16,7 @@ import (
var _ grafanarest.Storage = (*starStorage)(nil) var _ grafanarest.Storage = (*starStorage)(nil)
type starStorage struct { type starStorage struct {
store grafanarest.Storage grafanarest.Storage
} }
// When using list, we really just want to get the value for the single user // When using list, we really just want to get the value for the single user
@ -34,7 +33,7 @@ func (s *starStorage) List(ctx context.Context, options *internalversion.ListOpt
// Get the single user stars // Get the single user stars
case authlib.TypeUser: case authlib.TypeUser:
stars := &preferences.StarsList{} stars := &preferences.StarsList{}
obj, _ := s.store.Get(ctx, "user-"+user.GetIdentifier(), &v1.GetOptions{}) obj, _ := s.Get(ctx, "user-"+user.GetIdentifier(), &v1.GetOptions{})
if obj != nil { if obj != nil {
s, ok := obj.(*preferences.Stars) s, ok := obj.(*preferences.Stars)
if ok { if ok {
@ -44,61 +43,6 @@ func (s *starStorage) List(ctx context.Context, options *internalversion.ListOpt
return stars, nil return stars, nil
default: default:
return s.store.List(ctx, options) return s.Storage.List(ctx, options)
} }
} }
// ConvertToTable implements rest.Storage.
func (s *starStorage) ConvertToTable(ctx context.Context, obj runtime.Object, tableOptions runtime.Object) (*v1.Table, error) {
return s.store.ConvertToTable(ctx, obj, tableOptions)
}
// Create implements rest.Storage.
func (s *starStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *v1.CreateOptions) (runtime.Object, error) {
return s.store.Create(ctx, obj, createValidation, options)
}
// Delete implements rest.Storage.
func (s *starStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *v1.DeleteOptions) (runtime.Object, bool, error) {
return s.store.Delete(ctx, name, deleteValidation, options)
}
// DeleteCollection implements rest.Storage.
func (s *starStorage) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *v1.DeleteOptions, listOptions *internalversion.ListOptions) (runtime.Object, error) {
return s.store.DeleteCollection(ctx, deleteValidation, options, listOptions)
}
// Destroy implements rest.Storage.
func (s *starStorage) Destroy() {
s.store.Destroy()
}
// Get implements rest.Storage.
func (s *starStorage) Get(ctx context.Context, name string, options *v1.GetOptions) (runtime.Object, error) {
return s.store.Get(ctx, name, options)
}
// GetSingularName implements rest.Storage.
func (s *starStorage) GetSingularName() string {
return s.store.GetSingularName()
}
// NamespaceScoped implements rest.Storage.
func (s *starStorage) NamespaceScoped() bool {
return s.store.NamespaceScoped()
}
// New implements rest.Storage.
func (s *starStorage) New() runtime.Object {
return s.store.New()
}
// NewList implements rest.Storage.
func (s *starStorage) NewList() runtime.Object {
return s.store.NewList()
}
// Update implements rest.Storage.
func (s *starStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *v1.UpdateOptions) (runtime.Object, bool, error) {
return s.store.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options)
}

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"slices"
"strings" "strings"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -110,11 +109,10 @@ func (r *starsREST) Connect(ctx context.Context, name string, _ runtime.Object,
return return
} }
if !apply(&obj.Spec, item, remove) { if remove {
responder.Object(http.StatusNoContent, &v1.Status{ obj.Spec.Remove(item.group, item.kind, item.id)
Code: http.StatusNoContent, } else {
}) obj.Spec.Add(item.group, item.kind, item.id)
return
} }
if len(obj.Spec.Resource) == 0 { if len(obj.Spec.Resource) == 0 {
@ -128,9 +126,7 @@ func (r *starsREST) Connect(ctx context.Context, name string, _ runtime.Object,
responder.Error(err) responder.Error(err)
return return
} }
responder.Object(http.StatusOK, &v1.Status{ responder.Object(http.StatusOK, &v1.Status{Code: http.StatusOK})
Code: http.StatusOK,
})
}), nil }), nil
} }
@ -151,49 +147,3 @@ func itemFromPath(urlPath, prefix string) (starItem, error) {
id: parts[2], id: parts[2],
}, nil }, nil
} }
func apply(spec *preferences.StarsSpec, item starItem, remove bool) bool {
var stars *preferences.StarsResource
for idx, v := range spec.Resource {
if v.Group == item.group && v.Kind == item.kind {
stars = &spec.Resource[idx]
}
}
if stars == nil {
if remove {
return false
}
spec.Resource = append(spec.Resource, preferences.StarsResource{
Group: item.group,
Kind: item.kind,
Names: []string{},
})
stars = &spec.Resource[len(spec.Resource)-1]
}
idx := slices.Index(stars.Names, item.id)
if idx < 0 { // not found
if remove {
return false
}
stars.Names = append(stars.Names, item.id)
} else if remove {
stars.Names = append(stars.Names[:idx], stars.Names[idx+1:]...)
} else {
return false
}
slices.Sort(stars.Names)
// Remove the slot if only one value
if len(stars.Names) == 0 {
tmp := preferences.StarsSpec{}
for _, v := range spec.Resource {
if v.Group == item.group && v.Kind == item.kind {
continue
}
tmp.Resource = append(tmp.Resource, v)
}
spec.Resource = tmp.Resource
}
return true
}

View File

@ -4,194 +4,9 @@ import (
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
preferences "github.com/grafana/grafana/apps/preferences/pkg/apis/preferences/v1alpha1"
) )
func TestStarsWrite(t *testing.T) { func TestStarsWrite(t *testing.T) {
t.Run("apply", func(t *testing.T) {
tests := []struct {
name string
spec preferences.StarsSpec
item starItem
remove bool
changed bool
expect preferences.StarsSpec
}{{
name: "add to an existing array",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c"},
}},
},
item: starItem{
group: "g",
kind: "k",
id: "x",
},
remove: false,
changed: true,
expect: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c", "x"}, // added "x"
}},
},
}, {
name: "remove from an existing array",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c"},
}},
},
item: starItem{
group: "g",
kind: "k",
id: "b",
},
remove: true,
changed: true,
expect: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "c"}, // removed "b"
}},
},
}, {
name: "add to empty spec",
spec: preferences.StarsSpec{},
item: starItem{
group: "g",
kind: "k",
id: "a",
},
remove: false,
changed: true,
expect: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a"},
}},
},
}, {
name: "remove item that does not exist",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"x"},
}},
},
item: starItem{
group: "g",
kind: "k",
id: "a",
},
remove: true,
changed: false,
}, {
name: "add item that already exist",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"x"},
}},
},
item: starItem{
group: "g",
kind: "k",
id: "x",
},
remove: false,
changed: false,
}, {
name: "remove from empty",
spec: preferences.StarsSpec{},
item: starItem{
group: "g",
kind: "k",
id: "a",
},
remove: true,
changed: false,
}, {
name: "remove item that does not exist",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a", "b", "c"},
}},
},
item: starItem{
group: "g",
kind: "k",
id: "X",
},
remove: true,
changed: false,
}, {
name: "remove last item",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a"},
}},
},
item: starItem{
group: "g",
kind: "k",
id: "a",
},
remove: true,
changed: true,
expect: preferences.StarsSpec{},
}, {
name: "remove last item (with others)",
spec: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g",
Kind: "k",
Names: []string{"a"},
}, {
Group: "g2",
Kind: "k2",
Names: []string{"a"},
}}},
item: starItem{
group: "g",
kind: "k",
id: "a",
},
remove: true,
changed: true,
expect: preferences.StarsSpec{
Resource: []preferences.StarsResource{{
Group: "g2",
Kind: "k2",
Names: []string{"a"},
}}},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
changed := apply(&tt.spec, tt.item, tt.remove)
require.Equal(t, tt.changed, changed)
if changed {
require.Equal(t, tt.expect, tt.spec)
}
})
}
})
t.Run("path", func(t *testing.T) { t.Run("path", func(t *testing.T) {
tests := []struct { tests := []struct {
name string name string

View File

@ -0,0 +1,20 @@
package server
import (
"github.com/grafana/grafana/pkg/modules"
)
// ModuleRegisterer is used to inject enterprise dskit modules into
// the module manager. This abstraction allows other builds (e.g. enterprise) to register
// additional modules while keeping the core server decoupled from build-specific dependencies.
type ModuleRegisterer interface {
RegisterModules(manager modules.Registry)
}
type noopModuleRegisterer struct{}
func (noopModuleRegisterer) RegisterModules(manager modules.Registry) {}
func ProvideNoopModuleRegisterer() ModuleRegisterer {
return &noopModuleRegisterer{}
}

View File

@ -44,8 +44,9 @@ func NewModule(opts Options,
promGatherer prometheus.Gatherer, promGatherer prometheus.Gatherer,
tracer tracing.Tracer, // Ensures tracing is initialized tracer tracing.Tracer, // Ensures tracing is initialized
license licensing.Licensing, license licensing.Licensing,
moduleRegisterer ModuleRegisterer,
) (*ModuleServer, error) { ) (*ModuleServer, error) {
s, err := newModuleServer(opts, apiOpts, features, cfg, storageMetrics, indexMetrics, reg, promGatherer, license) s, err := newModuleServer(opts, apiOpts, features, cfg, storageMetrics, indexMetrics, reg, promGatherer, license, moduleRegisterer)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -66,6 +67,7 @@ func newModuleServer(opts Options,
reg prometheus.Registerer, reg prometheus.Registerer,
promGatherer prometheus.Gatherer, promGatherer prometheus.Gatherer,
license licensing.Licensing, license licensing.Licensing,
moduleRegisterer ModuleRegisterer,
) (*ModuleServer, error) { ) (*ModuleServer, error) {
rootCtx, shutdownFn := context.WithCancel(context.Background()) rootCtx, shutdownFn := context.WithCancel(context.Background())
@ -87,6 +89,7 @@ func newModuleServer(opts Options,
promGatherer: promGatherer, promGatherer: promGatherer,
registerer: reg, registerer: reg,
license: license, license: license,
moduleRegisterer: moduleRegisterer,
} }
return s, nil return s, nil
@ -124,6 +127,9 @@ type ModuleServer struct {
httpServerRouter *mux.Router httpServerRouter *mux.Router
searchServerRing *ring.Ring searchServerRing *ring.Ring
searchServerRingClientPool *ringclient.Pool searchServerRingClientPool *ringclient.Pool
// moduleRegisterer allows registration of modules provided by other builds (e.g. enterprise).
moduleRegisterer ModuleRegisterer
} }
// init initializes the server and its services. // init initializes the server and its services.
@ -202,6 +208,9 @@ func (s *ModuleServer) Run() error {
m.RegisterModule(modules.All, nil) m.RegisterModule(modules.All, nil)
// Register modules provided by other builds (e.g. enterprise).
s.moduleRegisterer.RegisterModules(m)
return m.Run(s.context) return m.Run(s.context)
} }

View File

@ -326,7 +326,7 @@ func initModuleServerForTest(
) testModuleServer { ) testModuleServer {
tracer := tracing.InitializeTracerForTest() tracer := tracing.InitializeTracerForTest()
ms, err := NewModule(opts, apiOpts, featuremgmt.WithFeatures(featuremgmt.FlagUnifiedStorageSearch), cfg, nil, nil, prometheus.NewRegistry(), prometheus.DefaultGatherer, tracer, nil) ms, err := NewModule(opts, apiOpts, featuremgmt.WithFeatures(featuremgmt.FlagUnifiedStorageSearch), cfg, nil, nil, prometheus.NewRegistry(), prometheus.DefaultGatherer, tracer, nil, ProvideNoopModuleRegisterer())
require.NoError(t, err) require.NoError(t, err)
conn, err := grpc.NewClient(cfg.GRPCServer.Address, conn, err := grpc.NewClient(cfg.GRPCServer.Address,

View File

@ -585,7 +585,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
ossProvider := guardian.ProvideGuardian() ossProvider := guardian.ProvideGuardian()
cacheServiceImpl := service9.ProvideCacheService(cacheService, sqlStore, ossProvider) cacheServiceImpl := service9.ProvideCacheService(cacheService, sqlStore, ossProvider)
shortURLService := shorturlimpl.ProvideService(sqlStore) shortURLService := shorturlimpl.ProvideService(sqlStore)
queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl) queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl, featureToggles, eventualRestConfigProvider)
dashboardService := service7.ProvideDashboardService(featureToggles, dashboardServiceImpl) dashboardService := service7.ProvideDashboardService(featureToggles, dashboardServiceImpl)
dashverService := dashverimpl.ProvideService(cfg, sqlStore, dashboardService, featureToggles, k8sHandlerWithFallback) dashverService := dashverimpl.ProvideService(cfg, sqlStore, dashboardService, featureToggles, k8sHandlerWithFallback)
dashboardSnapshotStore := database5.ProvideStore(sqlStore, cfg) dashboardSnapshotStore := database5.ProvideStore(sqlStore, cfg)
@ -1182,7 +1182,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
return nil, err return nil, err
} }
shortURLService := shorturlimpl.ProvideService(sqlStore) shortURLService := shorturlimpl.ProvideService(sqlStore)
queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl) queryHistoryService := queryhistory.ProvideService(cfg, sqlStore, routeRegisterImpl, accessControl, featureToggles, eventualRestConfigProvider)
dashboardService := service7.ProvideDashboardService(featureToggles, dashboardServiceImpl) dashboardService := service7.ProvideDashboardService(featureToggles, dashboardServiceImpl)
dashverService := dashverimpl.ProvideService(cfg, sqlStore, dashboardService, featureToggles, k8sHandlerWithFallback) dashverService := dashverimpl.ProvideService(cfg, sqlStore, dashboardService, featureToggles, k8sHandlerWithFallback)
dashboardSnapshotStore := database5.ProvideStore(sqlStore, cfg) dashboardSnapshotStore := database5.ProvideStore(sqlStore, cfg)
@ -1621,7 +1621,8 @@ func InitializeModuleServer(cfg *setting.Cfg, opts Options, apiOpts api.ServerOp
} }
hooksService := hooks.ProvideService() hooksService := hooks.ProvideService()
ossLicensingService := licensing.ProvideService(cfg, hooksService) ossLicensingService := licensing.ProvideService(cfg, hooksService)
moduleServer, err := NewModule(opts, apiOpts, featureToggles, cfg, storageMetrics, bleveIndexMetrics, registerer, gatherer, tracingService, ossLicensingService) moduleRegisterer := ProvideNoopModuleRegisterer()
moduleServer, err := NewModule(opts, apiOpts, featureToggles, cfg, storageMetrics, bleveIndexMetrics, registerer, gatherer, tracingService, ossLicensingService, moduleRegisterer)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -191,6 +191,8 @@ var wireExtsModuleServerSet = wire.NewSet(
// Unified storage // Unified storage
resource.ProvideStorageMetrics, resource.ProvideStorageMetrics,
resource.ProvideIndexMetrics, resource.ProvideIndexMetrics,
// Overriden by enterprise
ProvideNoopModuleRegisterer,
) )
var wireExtsStandaloneAPIServerSet = wire.NewSet( var wireExtsStandaloneAPIServerSet = wire.NewSet(

View File

@ -56,6 +56,8 @@ func (s *store) Get(ctx context.Context, ID int64) (*auth.ExternalSession, error
return externalSession, nil return externalSession, nil
} }
// List returns a list of external sessions that match the given query.
// If the result set contains more than one entry, the entries are sorted by ID in descending order.
func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery) ([]*auth.ExternalSession, error) { func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery) ([]*auth.ExternalSession, error) {
ctx, span := s.tracer.Start(ctx, "externalsession.List") ctx, span := s.tracer.Start(ctx, "externalsession.List")
defer span.End() defer span.End()
@ -65,6 +67,10 @@ func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery)
externalSession.ID = query.ID externalSession.ID = query.ID
} }
if query.UserID != 0 {
externalSession.UserID = query.UserID
}
hash := sha256.New() hash := sha256.New()
if query.SessionID != "" { if query.SessionID != "" {
@ -80,7 +86,7 @@ func (s *store) List(ctx context.Context, query *auth.ListExternalSessionQuery)
queryResult := make([]*auth.ExternalSession, 0) queryResult := make([]*auth.ExternalSession, 0)
err := s.sqlStore.WithDbSession(ctx, func(sess *db.Session) error { err := s.sqlStore.WithDbSession(ctx, func(sess *db.Session) error {
return sess.Find(&queryResult, externalSession) return sess.Desc("id").Find(&queryResult, externalSession)
}) })
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -51,6 +51,7 @@ type UpdateExternalSessionCommand struct {
type ListExternalSessionQuery struct { type ListExternalSessionQuery struct {
ID int64 ID int64
UserID int64
NameID string NameID string
SessionID string SessionID string
} }

View File

@ -93,7 +93,11 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, id *authn.Ident
updateCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 15*time.Second) updateCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 15*time.Second)
defer cancel() defer cancel()
token, refreshErr := s.service.TryTokenRefresh(updateCtx, id, id.SessionToken) token, refreshErr := s.service.TryTokenRefresh(updateCtx, id, &oauthtoken.TokenRefreshMetadata{
ExternalSessionID: id.SessionToken.ExternalSessionId,
AuthModule: id.GetAuthenticatedBy(),
AuthID: id.GetAuthID(),
})
if refreshErr != nil { if refreshErr != nil {
if errors.Is(refreshErr, context.Canceled) { if errors.Is(refreshErr, context.Canceled) {
return nil, nil return nil, nil
@ -107,7 +111,7 @@ func (s *OAuthTokenSync) SyncOauthTokenHook(ctx context.Context, id *authn.Ident
ctxLogger.Error("Failed to refresh OAuth access token", "id", id.ID, "error", refreshErr) ctxLogger.Error("Failed to refresh OAuth access token", "id", id.ID, "error", refreshErr)
// log the user out // log the user out
if err := s.sessionService.RevokeToken(ctx, id.SessionToken, false); err != nil { if err := s.sessionService.RevokeToken(ctx, id.SessionToken, false); err != nil && !errors.Is(err, auth.ErrUserTokenNotFound) {
ctxLogger.Warn("Failed to revoke session token", "id", id.ID, "tokenId", id.SessionToken.Id, "error", err) ctxLogger.Warn("Failed to revoke session token", "id", id.ID, "tokenId", id.SessionToken.Id, "error", err)
} }

View File

@ -25,6 +25,7 @@ import (
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model" contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
"github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/services/login"
"github.com/grafana/grafana/pkg/services/oauthtoken"
"github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest" "github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest"
) )
@ -77,6 +78,14 @@ func TestOAuthTokenSync_SyncOAuthTokenHook(t *testing.T) {
expectRevokeTokenCalled: false, expectRevokeTokenCalled: false,
expectToken: &login.UserAuth{OAuthExpiry: time.Now().Add(10 * time.Minute)}, expectToken: &login.UserAuth{OAuthExpiry: time.Now().Add(10 * time.Minute)},
}, },
{
desc: "should not invalidate session if token refresh fails with no refresh token",
identity: &authn.Identity{ID: "1", Type: claims.TypeUser, SessionToken: &auth.UserToken{}, AuthenticatedBy: login.AzureADAuthModule},
expectedTryRefreshErr: oauthtoken.ErrNoRefreshTokenFound,
expectTryRefreshTokenCalled: true,
expectRevokeTokenCalled: true,
expectedErr: oauthtoken.ErrNoRefreshTokenFound,
},
// TODO: address coverage of oauthtoken sync // TODO: address coverage of oauthtoken sync
} }
@ -89,7 +98,7 @@ func TestOAuthTokenSync_SyncOAuthTokenHook(t *testing.T) {
) )
service := &oauthtokentest.MockOauthTokenService{ service := &oauthtokentest.MockOauthTokenService{
TryTokenRefreshFunc: func(ctx context.Context, usr identity.Requester, _ *auth.UserToken) (*oauth2.Token, error) { TryTokenRefreshFunc: func(ctx context.Context, usr identity.Requester, _ *oauthtoken.TokenRefreshMetadata) (*oauth2.Token, error) {
tryRefreshCalled = true tryRefreshCalled = true
return nil, tt.expectedTryRefreshErr return nil, tt.expectedTryRefreshErr
}, },

View File

@ -297,7 +297,9 @@ func (c *OAuth) Logout(ctx context.Context, user identity.Requester, sessionToke
ctxLogger := c.log.FromContext(ctx).New("userID", userID) ctxLogger := c.log.FromContext(ctx).New("userID", userID)
if err := c.oauthService.InvalidateOAuthTokens(ctx, user, sessionToken); err != nil { if err := c.oauthService.InvalidateOAuthTokens(ctx, user, &oauthtoken.TokenRefreshMetadata{
ExternalSessionID: sessionToken.ExternalSessionId,
AuthModule: user.GetAuthenticatedBy()}); err != nil {
ctxLogger.Error("Failed to invalidate tokens", "error", err) ctxLogger.Error("Failed to invalidate tokens", "error", err)
} }

View File

@ -19,10 +19,12 @@ import (
"github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/login/social" "github.com/grafana/grafana/pkg/login/social"
"github.com/grafana/grafana/pkg/login/social/socialtest" "github.com/grafana/grafana/pkg/login/social/socialtest"
"github.com/grafana/grafana/pkg/models/usertoken"
"github.com/grafana/grafana/pkg/services/auth" "github.com/grafana/grafana/pkg/services/auth"
"github.com/grafana/grafana/pkg/services/authn" "github.com/grafana/grafana/pkg/services/authn"
"github.com/grafana/grafana/pkg/services/featuremgmt" "github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/login" "github.com/grafana/grafana/pkg/services/login"
"github.com/grafana/grafana/pkg/services/oauthtoken"
"github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest" "github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest"
"github.com/grafana/grafana/pkg/services/org" "github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/setting"
@ -481,7 +483,7 @@ func TestOAuth_Logout(t *testing.T) {
"id_token": "some.id.token", "id_token": "some.id.token",
}) })
}, },
InvalidateOAuthTokensFunc: func(_ context.Context, _ identity.Requester, _ *auth.UserToken) error { InvalidateOAuthTokensFunc: func(_ context.Context, _ identity.Requester, _ *oauthtoken.TokenRefreshMetadata) error {
invalidateTokenCalled = true invalidateTokenCalled = true
return nil return nil
}, },
@ -492,7 +494,7 @@ func TestOAuth_Logout(t *testing.T) {
} }
c := ProvideOAuth(authn.ClientWithPrefix("azuread"), tt.cfg, mockService, fakeSocialSvc, &setting.OSSImpl{Cfg: tt.cfg}, featuremgmt.WithFeatures(), tracing.InitializeTracerForTest()) c := ProvideOAuth(authn.ClientWithPrefix("azuread"), tt.cfg, mockService, fakeSocialSvc, &setting.OSSImpl{Cfg: tt.cfg}, featuremgmt.WithFeatures(), tracing.InitializeTracerForTest())
redirect, ok := c.Logout(context.Background(), &authn.Identity{ID: "1", Type: claims.TypeUser}, nil) redirect, ok := c.Logout(context.Background(), &authn.Identity{ID: "1", Type: claims.TypeUser}, &usertoken.UserToken{})
assert.Equal(t, tt.expectedOK, ok) assert.Equal(t, tt.expectedOK, ok)
if tt.expectedOK { if tt.expectedOK {

View File

@ -760,12 +760,10 @@ func (s *Service) listPermission(ctx context.Context, scopeMap map[string]bool,
cacheHit := false cacheHit := false
if t.HasFolderSupport() { if t.HasFolderSupport() {
var err error var err error
ok = false
if !req.Options.SkipCache { if !req.Options.SkipCache {
tree, ok = s.getCachedFolderTree(ctx, req.Namespace) tree, cacheHit = s.getCachedFolderTree(ctx, req.Namespace)
cacheHit = true
} }
if !ok { if !cacheHit {
tree, err = s.buildFolderTree(ctx, req.Namespace) tree, err = s.buildFolderTree(ctx, req.Namespace)
if err != nil { if err != nil {
ctxLogger.Error("could not build folder and dashboard tree", "error", err) ctxLogger.Error("could not build folder and dashboard tree", "error", err)

Some files were not shown because too many files have changed in this diff Show More