mirror of https://github.com/grafana/grafana.git
Alerting: Add extended definition to prometheus alert rules api (#103320)
* Alerting: Add extended definition to prometheus alert rules api This adds `isPaused` and `notificationSettings` to the paginated rules api to enable the paginated view of GMA rules. refactor: make alert rule status and state retrieval extensible This lets us get status from other sources than the local ruler. * update swagger spec * add safety checks in test
This commit is contained in:
parent
b09d79b21c
commit
9f07e49cdd
|
|
@ -349,7 +349,7 @@ func TestRouteGetRuleStatuses(t *testing.T) {
|
|||
|
||||
t.Run("with a rule that only has one query", func(t *testing.T) {
|
||||
fakeStore, fakeAIM, api := setupAPI(t)
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withClassicConditionSingleQuery())
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withClassicConditionSingleQuery(), gen.WithNoNotificationSettings(), gen.WithIsPaused(false))
|
||||
folder := fakeStore.Folders[orgID][0]
|
||||
|
||||
r := api.RouteGetRuleStatuses(c)
|
||||
|
|
@ -390,6 +390,7 @@ func TestRouteGetRuleStatuses(t *testing.T) {
|
|||
"__a_private_label_on_the_rule__": "a_value"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "2022-03-10T14:01:00Z",
|
||||
"duration": 180,
|
||||
|
|
@ -411,9 +412,91 @@ func TestRouteGetRuleStatuses(t *testing.T) {
|
|||
`, folder.Fullpath), string(r.Body()))
|
||||
})
|
||||
|
||||
t.Run("with a rule that is paused", func(t *testing.T) {
|
||||
fakeStore, fakeAIM, api := setupAPI(t)
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withClassicConditionSingleQuery(), gen.WithNoNotificationSettings(), gen.WithIsPaused(true))
|
||||
folder := fakeStore.Folders[orgID][0]
|
||||
|
||||
r := api.RouteGetRuleStatuses(c)
|
||||
require.Equal(t, http.StatusOK, r.Status())
|
||||
require.JSONEq(t, fmt.Sprintf(`
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"groups": [{
|
||||
"name": "rule-group",
|
||||
"file": "%s",
|
||||
"folderUid": "namespaceUID",
|
||||
"rules": [{
|
||||
"state": "inactive",
|
||||
"name": "AlwaysFiring",
|
||||
"folderUid": "namespaceUID",
|
||||
"uid": "RuleUID",
|
||||
"query": "vector(1)",
|
||||
"queriedDatasourceUIDs": ["AUID"],
|
||||
"alerts": [{
|
||||
"labels": {
|
||||
"job": "prometheus"
|
||||
},
|
||||
"annotations": {
|
||||
"severity": "critical"
|
||||
},
|
||||
"state": "Normal",
|
||||
"activeAt": "0001-01-01T00:00:00Z",
|
||||
"value": ""
|
||||
}],
|
||||
"totals": {
|
||||
"normal": 1
|
||||
},
|
||||
"totalsFiltered": {
|
||||
"normal": 1
|
||||
},
|
||||
"labels": {
|
||||
"__a_private_label_on_the_rule__": "a_value"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": true,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "2022-03-10T14:01:00Z",
|
||||
"duration": 180,
|
||||
"keepFiringFor": 10,
|
||||
"evaluationTime": 60
|
||||
}],
|
||||
"totals": {
|
||||
"inactive": 1
|
||||
},
|
||||
"interval": 60,
|
||||
"lastEvaluation": "2022-03-10T14:01:00Z",
|
||||
"evaluationTime": 60
|
||||
}],
|
||||
"totals": {
|
||||
"inactive": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
`, folder.Fullpath), string(r.Body()))
|
||||
})
|
||||
|
||||
t.Run("with a rule that has notification settings", func(t *testing.T) {
|
||||
fakeStore, fakeAIM, api := setupAPI(t)
|
||||
notificationSettings := ngmodels.NotificationSettings{
|
||||
Receiver: "test-receiver",
|
||||
GroupBy: []string{"job"},
|
||||
}
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withClassicConditionSingleQuery(), gen.WithNotificationSettings(notificationSettings), gen.WithIsPaused(false))
|
||||
r := api.RouteGetRuleStatuses(c)
|
||||
require.Equal(t, http.StatusOK, r.Status())
|
||||
var res apimodels.RuleResponse
|
||||
require.NoError(t, json.Unmarshal(r.Body(), &res))
|
||||
require.Len(t, res.Data.RuleGroups, 1)
|
||||
require.Len(t, res.Data.RuleGroups[0].Rules, 1)
|
||||
require.NotNil(t, res.Data.RuleGroups[0].Rules[0].NotificationSettings)
|
||||
require.Equal(t, notificationSettings.Receiver, res.Data.RuleGroups[0].Rules[0].NotificationSettings.Receiver)
|
||||
})
|
||||
|
||||
t.Run("with the inclusion of internal Labels", func(t *testing.T) {
|
||||
fakeStore, fakeAIM, api := setupAPI(t)
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withClassicConditionSingleQuery())
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withClassicConditionSingleQuery(), gen.WithNoNotificationSettings(), gen.WithIsPaused(false))
|
||||
folder := fakeStore.Folders[orgID][0]
|
||||
|
||||
req, err := http.NewRequest("GET", "/api/v1/rules?includeInternalLabels=true", nil)
|
||||
|
|
@ -461,6 +544,7 @@ func TestRouteGetRuleStatuses(t *testing.T) {
|
|||
"__alert_rule_uid__": "RuleUID"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "2022-03-10T14:01:00Z",
|
||||
"duration": 180,
|
||||
|
|
@ -484,7 +568,7 @@ func TestRouteGetRuleStatuses(t *testing.T) {
|
|||
|
||||
t.Run("with a rule that has multiple queries", func(t *testing.T) {
|
||||
fakeStore, fakeAIM, api := setupAPI(t)
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withExpressionsMultiQuery())
|
||||
generateRuleAndInstanceWithQuery(t, orgID, fakeAIM, fakeStore, withExpressionsMultiQuery(), gen.WithNoNotificationSettings(), gen.WithIsPaused(false))
|
||||
folder := fakeStore.Folders[orgID][0]
|
||||
|
||||
r := api.RouteGetRuleStatuses(c)
|
||||
|
|
@ -525,6 +609,7 @@ func TestRouteGetRuleStatuses(t *testing.T) {
|
|||
"__a_private_label_on_the_rule__": "a_value"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "2022-03-10T14:01:00Z",
|
||||
"duration": 180,
|
||||
|
|
@ -1684,11 +1769,11 @@ func setupAPI(t *testing.T) (*fakes.RuleStore, *fakeAlertInstanceManager, Promet
|
|||
return fakeStore, fakeAIM, api
|
||||
}
|
||||
|
||||
func generateRuleAndInstanceWithQuery(t *testing.T, orgID int64, fakeAIM *fakeAlertInstanceManager, fakeStore *fakes.RuleStore, query ngmodels.AlertRuleMutator) {
|
||||
func generateRuleAndInstanceWithQuery(t *testing.T, orgID int64, fakeAIM *fakeAlertInstanceManager, fakeStore *fakes.RuleStore, query ngmodels.AlertRuleMutator, additionalMutators ...ngmodels.AlertRuleMutator) {
|
||||
t.Helper()
|
||||
|
||||
gen := ngmodels.RuleGen
|
||||
r := gen.With(gen.WithOrgID(orgID), asFixture(), query).GenerateRef()
|
||||
r := gen.With(append([]ngmodels.AlertRuleMutator{gen.WithOrgID(orgID), asFixture(), query}, additionalMutators...)...).GenerateRef()
|
||||
|
||||
fakeAIM.GenerateAlertInstances(orgID, r.UID, 1, func(s *state.State) *state.State {
|
||||
s.Labels = data.Labels{
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
|||
namespaces[namespaceUID] = folder.Fullpath
|
||||
}
|
||||
|
||||
ruleResponse = PrepareRuleGroupStatuses(srv.log, srv.manager, srv.status, srv.store, RuleGroupStatusesOptions{
|
||||
ruleResponse = PrepareRuleGroupStatuses(srv.log, srv.store, RuleGroupStatusesOptions{
|
||||
Ctx: c.Req.Context(),
|
||||
OrgID: c.OrgID,
|
||||
Query: c.Req.Form,
|
||||
|
|
@ -260,12 +260,107 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
|||
AuthorizeRuleGroup: func(rules []*ngmodels.AlertRule) (bool, error) {
|
||||
return srv.authz.HasAccessToRuleGroup(c.Req.Context(), c.SignedInUser, rules)
|
||||
},
|
||||
})
|
||||
}, RuleStatusMutatorGenerator(srv.status), RuleAlertStateMutatorGenerator(srv.manager))
|
||||
|
||||
return response.JSON(ruleResponse.HTTPStatusCode(), ruleResponse)
|
||||
}
|
||||
|
||||
func PrepareRuleGroupStatuses(log log.Logger, manager state.AlertInstanceManager, status StatusReader, store ListAlertRulesStore, opts RuleGroupStatusesOptions) apimodels.RuleResponse {
|
||||
// mutator function used to attach status to the rule
|
||||
type RuleStatusMutator func(source *ngmodels.AlertRule, toMutate *apimodels.AlertingRule)
|
||||
|
||||
// mutator function used to attach alert states to the rule and returns the totals and filtered totals
|
||||
type RuleAlertStateMutator func(source *ngmodels.AlertRule, toMutate *apimodels.AlertingRule, stateFilterSet map[eval.State]struct{}, matchers labels.Matchers, labelOptions []ngmodels.LabelOption) (total map[string]int64, filteredTotal map[string]int64)
|
||||
|
||||
func RuleStatusMutatorGenerator(statusReader StatusReader) RuleStatusMutator {
|
||||
return func(source *ngmodels.AlertRule, toMutate *apimodels.AlertingRule) {
|
||||
status, ok := statusReader.Status(source.GetKey())
|
||||
// Grafana by design return "ok" health and default other fields for unscheduled rules.
|
||||
// This differs from Prometheus.
|
||||
if !ok {
|
||||
status = ngmodels.RuleStatus{
|
||||
Health: "ok",
|
||||
}
|
||||
}
|
||||
toMutate.Health = status.Health
|
||||
toMutate.LastError = errorOrEmpty(status.LastError)
|
||||
toMutate.LastEvaluation = status.EvaluationTimestamp
|
||||
toMutate.EvaluationTime = status.EvaluationDuration.Seconds()
|
||||
}
|
||||
}
|
||||
|
||||
func RuleAlertStateMutatorGenerator(manager state.AlertInstanceManager) RuleAlertStateMutator {
|
||||
return func(source *ngmodels.AlertRule, toMutate *apimodels.AlertingRule, stateFilterSet map[eval.State]struct{}, matchers labels.Matchers, labelOptions []ngmodels.LabelOption) (map[string]int64, map[string]int64) {
|
||||
states := manager.GetStatesForRuleUID(source.OrgID, source.UID)
|
||||
totals := make(map[string]int64)
|
||||
totalsFiltered := make(map[string]int64)
|
||||
for _, alertState := range states {
|
||||
activeAt := alertState.StartsAt
|
||||
valString := ""
|
||||
if alertState.State == eval.Alerting || alertState.State == eval.Pending || alertState.State == eval.Recovering {
|
||||
valString = FormatValues(alertState)
|
||||
}
|
||||
stateKey := strings.ToLower(alertState.State.String())
|
||||
totals[stateKey] += 1
|
||||
// Do not add error twice when execution error state is Error
|
||||
if alertState.Error != nil && source.ExecErrState != ngmodels.ErrorErrState {
|
||||
totals["error"] += 1
|
||||
}
|
||||
alert := apimodels.Alert{
|
||||
Labels: apimodels.LabelsFromMap(alertState.GetLabels(labelOptions...)),
|
||||
Annotations: apimodels.LabelsFromMap(alertState.Annotations),
|
||||
|
||||
// TODO: or should we make this two fields? Using one field lets the
|
||||
// frontend use the same logic for parsing text on annotations and this.
|
||||
State: state.FormatStateAndReason(alertState.State, alertState.StateReason),
|
||||
ActiveAt: &activeAt,
|
||||
Value: valString,
|
||||
}
|
||||
|
||||
// Set the state of the rule based on the state of its alerts.
|
||||
// Only update the rule state with 'pending' or 'recovering' if the current state is 'inactive'.
|
||||
// This prevents overwriting a higher-severity 'firing' state in the case of a rule with multiple alerts.
|
||||
switch alertState.State {
|
||||
case eval.Normal:
|
||||
case eval.Pending:
|
||||
if toMutate.State == "inactive" {
|
||||
toMutate.State = "pending"
|
||||
}
|
||||
case eval.Recovering:
|
||||
if toMutate.State == "inactive" {
|
||||
toMutate.State = "recovering"
|
||||
}
|
||||
case eval.Alerting:
|
||||
if toMutate.ActiveAt == nil || toMutate.ActiveAt.After(activeAt) {
|
||||
toMutate.ActiveAt = &activeAt
|
||||
}
|
||||
toMutate.State = "firing"
|
||||
case eval.Error:
|
||||
case eval.NoData:
|
||||
}
|
||||
|
||||
if len(stateFilterSet) > 0 {
|
||||
if _, ok := stateFilterSet[alertState.State]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !matchersMatch(matchers, alertState.Labels) {
|
||||
continue
|
||||
}
|
||||
|
||||
totalsFiltered[stateKey] += 1
|
||||
// Do not add error twice when execution error state is Error
|
||||
if alertState.Error != nil && source.ExecErrState != ngmodels.ErrorErrState {
|
||||
totalsFiltered["error"] += 1
|
||||
}
|
||||
|
||||
toMutate.Alerts = append(toMutate.Alerts, alert)
|
||||
}
|
||||
return totals, totalsFiltered
|
||||
}
|
||||
}
|
||||
|
||||
func PrepareRuleGroupStatuses(log log.Logger, store ListAlertRulesStore, opts RuleGroupStatusesOptions, ruleStatusMutator RuleStatusMutator, alertStateMutator RuleAlertStateMutator) apimodels.RuleResponse {
|
||||
ruleResponse := apimodels.RuleResponse{
|
||||
DiscoveryBase: apimodels.DiscoveryBase{
|
||||
Status: "success",
|
||||
|
|
@ -299,16 +394,16 @@ func PrepareRuleGroupStatuses(log log.Logger, manager state.AlertInstanceManager
|
|||
ruleResponse.ErrorType = apiv1.ErrBadData
|
||||
return ruleResponse
|
||||
}
|
||||
withStates, err := getStatesFromQuery(opts.Query)
|
||||
stateFilter, err := getStatesFromQuery(opts.Query)
|
||||
if err != nil {
|
||||
ruleResponse.Status = "error"
|
||||
ruleResponse.Error = err.Error()
|
||||
ruleResponse.ErrorType = apiv1.ErrBadData
|
||||
return ruleResponse
|
||||
}
|
||||
withStatesFast := make(map[eval.State]struct{})
|
||||
for _, state := range withStates {
|
||||
withStatesFast[state] = struct{}{}
|
||||
stateFilterSet := make(map[eval.State]struct{})
|
||||
for _, state := range stateFilter {
|
||||
stateFilterSet[state] = struct{}{}
|
||||
}
|
||||
|
||||
var labelOptions []ngmodels.LabelOption
|
||||
|
|
@ -392,14 +487,14 @@ func PrepareRuleGroupStatuses(log log.Logger, manager state.AlertInstanceManager
|
|||
break
|
||||
}
|
||||
|
||||
ruleGroup, totals := toRuleGroup(log, manager, status, rg.GroupKey, rg.Folder, rg.Rules, limitAlertsPerRule, withStatesFast, matchers, labelOptions)
|
||||
ruleGroup, totals := toRuleGroup(log, rg.GroupKey, rg.Folder, rg.Rules, limitAlertsPerRule, stateFilterSet, matchers, labelOptions, ruleStatusMutator, alertStateMutator)
|
||||
ruleGroup.Totals = totals
|
||||
for k, v := range totals {
|
||||
rulesTotals[k] += v
|
||||
}
|
||||
|
||||
if len(withStates) > 0 {
|
||||
filterRules(ruleGroup, withStatesFast)
|
||||
if len(stateFilter) > 0 {
|
||||
filterRules(ruleGroup, stateFilterSet)
|
||||
}
|
||||
|
||||
if limitRulesPerGroup > -1 && int64(len(ruleGroup.Rules)) > limitRulesPerGroup {
|
||||
|
|
@ -524,7 +619,7 @@ func matchersMatch(matchers []*labels.Matcher, labels map[string]string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func toRuleGroup(log log.Logger, manager state.AlertInstanceManager, sr StatusReader, groupKey ngmodels.AlertRuleGroupKey, folderFullPath string, rules []*ngmodels.AlertRule, limitAlerts int64, withStates map[eval.State]struct{}, matchers labels.Matchers, labelOptions []ngmodels.LabelOption) (*apimodels.RuleGroup, map[string]int64) {
|
||||
func toRuleGroup(log log.Logger, groupKey ngmodels.AlertRuleGroupKey, folderFullPath string, rules []*ngmodels.AlertRule, limitAlerts int64, stateFilterSet map[eval.State]struct{}, matchers labels.Matchers, labelOptions []ngmodels.LabelOption, ruleStatusMutator RuleStatusMutator, ruleAlertStateMutator RuleAlertStateMutator) (*apimodels.RuleGroup, map[string]int64) {
|
||||
newGroup := &apimodels.RuleGroup{
|
||||
Name: groupKey.RuleGroup,
|
||||
// file is what Prometheus uses for provisioning, we replace it with namespace which is the folder in Grafana.
|
||||
|
|
@ -536,112 +631,39 @@ func toRuleGroup(log log.Logger, manager state.AlertInstanceManager, sr StatusRe
|
|||
|
||||
ngmodels.RulesGroup(rules).SortByGroupIndex()
|
||||
for _, rule := range rules {
|
||||
status, ok := sr.Status(rule.GetKey())
|
||||
// Grafana by design return "ok" health and default other fields for unscheduled rules.
|
||||
// This differs from Prometheus.
|
||||
if !ok {
|
||||
status = ngmodels.RuleStatus{
|
||||
Health: "ok",
|
||||
}
|
||||
}
|
||||
|
||||
queriedDatasourceUIDs := extractDatasourceUIDs(rule)
|
||||
|
||||
alertingRule := apimodels.AlertingRule{
|
||||
State: "inactive",
|
||||
Name: rule.Title,
|
||||
Query: ruleToQuery(log, rule),
|
||||
QueriedDatasourceUIDs: queriedDatasourceUIDs,
|
||||
QueriedDatasourceUIDs: extractDatasourceUIDs(rule),
|
||||
Duration: rule.For.Seconds(),
|
||||
KeepFiringFor: rule.KeepFiringFor.Seconds(),
|
||||
Annotations: apimodels.LabelsFromMap(rule.Annotations),
|
||||
Rule: apimodels.Rule{
|
||||
UID: rule.UID,
|
||||
Name: rule.Title,
|
||||
FolderUID: rule.NamespaceUID,
|
||||
Labels: apimodels.LabelsFromMap(rule.GetLabels(labelOptions...)),
|
||||
Type: rule.Type().String(),
|
||||
IsPaused: rule.IsPaused,
|
||||
},
|
||||
}
|
||||
|
||||
newRule := apimodels.Rule{
|
||||
UID: rule.UID,
|
||||
Name: rule.Title,
|
||||
FolderUID: rule.NamespaceUID,
|
||||
Labels: apimodels.LabelsFromMap(rule.GetLabels(labelOptions...)),
|
||||
Health: status.Health,
|
||||
LastError: errorOrEmpty(status.LastError),
|
||||
Type: rule.Type().String(),
|
||||
LastEvaluation: status.EvaluationTimestamp,
|
||||
EvaluationTime: status.EvaluationDuration.Seconds(),
|
||||
}
|
||||
|
||||
states := manager.GetStatesForRuleUID(rule.OrgID, rule.UID)
|
||||
totals := make(map[string]int64)
|
||||
totalsFiltered := make(map[string]int64)
|
||||
for _, alertState := range states {
|
||||
activeAt := alertState.StartsAt
|
||||
valString := ""
|
||||
if alertState.State == eval.Alerting || alertState.State == eval.Pending || alertState.State == eval.Recovering {
|
||||
valString = FormatValues(alertState)
|
||||
}
|
||||
stateKey := strings.ToLower(alertState.State.String())
|
||||
totals[stateKey] += 1
|
||||
// Do not add error twice when execution error state is Error
|
||||
if alertState.Error != nil && rule.ExecErrState != ngmodels.ErrorErrState {
|
||||
totals["error"] += 1
|
||||
}
|
||||
alert := apimodels.Alert{
|
||||
Labels: apimodels.LabelsFromMap(alertState.GetLabels(labelOptions...)),
|
||||
Annotations: apimodels.LabelsFromMap(alertState.Annotations),
|
||||
|
||||
// TODO: or should we make this two fields? Using one field lets the
|
||||
// frontend use the same logic for parsing text on annotations and this.
|
||||
State: state.FormatStateAndReason(alertState.State, alertState.StateReason),
|
||||
ActiveAt: &activeAt,
|
||||
Value: valString,
|
||||
}
|
||||
|
||||
// Set the state of the rule based on the state of its alerts.
|
||||
// Only update the rule state with 'pending' or 'recovering' if the current state is 'inactive'.
|
||||
// This prevents overwriting a higher-severity 'firing' state in the case of a rule with multiple alerts.
|
||||
switch alertState.State {
|
||||
case eval.Normal:
|
||||
case eval.Pending:
|
||||
if alertingRule.State == "inactive" {
|
||||
alertingRule.State = "pending"
|
||||
}
|
||||
case eval.Recovering:
|
||||
if alertingRule.State == "inactive" {
|
||||
alertingRule.State = "recovering"
|
||||
}
|
||||
case eval.Alerting:
|
||||
if alertingRule.ActiveAt == nil || alertingRule.ActiveAt.After(activeAt) {
|
||||
alertingRule.ActiveAt = &activeAt
|
||||
}
|
||||
alertingRule.State = "firing"
|
||||
case eval.Error:
|
||||
case eval.NoData:
|
||||
}
|
||||
|
||||
if len(withStates) > 0 {
|
||||
if _, ok := withStates[alertState.State]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !matchersMatch(matchers, alertState.Labels) {
|
||||
continue
|
||||
}
|
||||
|
||||
totalsFiltered[stateKey] += 1
|
||||
// Do not add error twice when execution error state is Error
|
||||
if alertState.Error != nil && rule.ExecErrState != ngmodels.ErrorErrState {
|
||||
totalsFiltered["error"] += 1
|
||||
}
|
||||
|
||||
alertingRule.Alerts = append(alertingRule.Alerts, alert)
|
||||
// mutate rule to apply status fields
|
||||
ruleStatusMutator(rule, &alertingRule)
|
||||
|
||||
if len(rule.NotificationSettings) > 0 {
|
||||
alertingRule.NotificationSettings = (*apimodels.AlertRuleNotificationSettings)(&rule.NotificationSettings[0])
|
||||
}
|
||||
|
||||
// mutate rule for alert states
|
||||
totals, totalsFiltered := ruleAlertStateMutator(rule, &alertingRule, stateFilterSet, matchers, labelOptions)
|
||||
if alertingRule.State != "" {
|
||||
rulesTotals[alertingRule.State] += 1
|
||||
}
|
||||
|
||||
if newRule.Health == "error" || newRule.Health == "nodata" {
|
||||
rulesTotals[newRule.Health] += 1
|
||||
if alertingRule.Health == "error" || alertingRule.Health == "nodata" {
|
||||
rulesTotals[alertingRule.Health] += 1
|
||||
}
|
||||
|
||||
alertsBy := apimodels.AlertsBy(apimodels.AlertsByImportance)
|
||||
|
|
@ -654,14 +676,13 @@ func toRuleGroup(log log.Logger, manager state.AlertInstanceManager, sr StatusRe
|
|||
alertsBy.Sort(alertingRule.Alerts)
|
||||
}
|
||||
|
||||
alertingRule.Rule = newRule
|
||||
alertingRule.Totals = totals
|
||||
alertingRule.TotalsFiltered = totalsFiltered
|
||||
newGroup.Rules = append(newGroup.Rules, alertingRule)
|
||||
newGroup.Interval = float64(rule.IntervalSeconds)
|
||||
// TODO yuri. Change that when scheduler will process alerts in groups
|
||||
newGroup.EvaluationTime = newRule.EvaluationTime
|
||||
newGroup.LastEvaluation = newRule.LastEvaluation
|
||||
newGroup.EvaluationTime = alertingRule.EvaluationTime
|
||||
newGroup.LastEvaluation = alertingRule.LastEvaluation
|
||||
}
|
||||
|
||||
return newGroup, rulesTotals
|
||||
|
|
|
|||
|
|
@ -475,6 +475,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"keepFiringFor": {
|
||||
"format": "double",
|
||||
"type": "number"
|
||||
|
|
@ -492,6 +495,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"queriedDatasourceUIDs": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
|
|
@ -3769,6 +3775,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"labels": {
|
||||
"$ref": "#/definitions/Labels"
|
||||
},
|
||||
|
|
@ -3782,6 +3791,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
@ -6649,4 +6661,4 @@
|
|||
}
|
||||
},
|
||||
"swagger": "2.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -180,9 +180,11 @@ type Rule struct {
|
|||
Health string `json:"health"`
|
||||
LastError string `json:"lastError,omitempty"`
|
||||
// required: true
|
||||
Type string `json:"type"`
|
||||
LastEvaluation time.Time `json:"lastEvaluation"`
|
||||
EvaluationTime float64 `json:"evaluationTime"`
|
||||
Type string `json:"type"`
|
||||
LastEvaluation time.Time `json:"lastEvaluation"`
|
||||
EvaluationTime float64 `json:"evaluationTime"`
|
||||
IsPaused bool `json:"isPaused"`
|
||||
NotificationSettings *AlertRuleNotificationSettings `json:"notificationSettings,omitempty"`
|
||||
}
|
||||
|
||||
// Alert has info for an alert.
|
||||
|
|
|
|||
|
|
@ -475,6 +475,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"keepFiringFor": {
|
||||
"format": "double",
|
||||
"type": "number"
|
||||
|
|
@ -492,6 +495,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"queriedDatasourceUIDs": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
|
|
@ -3769,6 +3775,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"labels": {
|
||||
"$ref": "#/definitions/Labels"
|
||||
},
|
||||
|
|
@ -3782,6 +3791,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
@ -4876,6 +4888,7 @@
|
|||
"type": "object"
|
||||
},
|
||||
"alertGroups": {
|
||||
"description": "AlertGroups alert groups",
|
||||
"items": {
|
||||
"$ref": "#/definitions/alertGroup",
|
||||
"type": "object"
|
||||
|
|
@ -9520,4 +9533,4 @@
|
|||
}
|
||||
},
|
||||
"swagger": "2.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4583,6 +4583,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"keepFiringFor": {
|
||||
"type": "number",
|
||||
"format": "double"
|
||||
|
|
@ -4600,6 +4603,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"queriedDatasourceUIDs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
|
@ -7876,6 +7882,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"labels": {
|
||||
"$ref": "#/definitions/Labels"
|
||||
},
|
||||
|
|
@ -7889,6 +7898,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
@ -8976,6 +8988,7 @@
|
|||
}
|
||||
},
|
||||
"alertGroups": {
|
||||
"description": "AlertGroups alert groups",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
|
|
@ -9553,4 +9566,4 @@
|
|||
"type": "basic"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -260,6 +260,7 @@ func TestIntegrationPrometheusRules(t *testing.T) {
|
|||
"label1": "val1"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
@ -270,6 +271,7 @@ func TestIntegrationPrometheusRules(t *testing.T) {
|
|||
"folderUid": "default",
|
||||
"uid": "%s",
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
@ -323,6 +325,7 @@ func TestIntegrationPrometheusRules(t *testing.T) {
|
|||
"label1": "val1"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
@ -333,6 +336,7 @@ func TestIntegrationPrometheusRules(t *testing.T) {
|
|||
"folderUid": "default",
|
||||
"uid": "%s",
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
@ -483,6 +487,7 @@ func TestIntegrationPrometheusRulesFilterByDashboard(t *testing.T) {
|
|||
"__panelId__": "1"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
@ -493,6 +498,7 @@ func TestIntegrationPrometheusRulesFilterByDashboard(t *testing.T) {
|
|||
"folderUid": "default",
|
||||
"query": "[{\"refId\":\"A\",\"queryType\":\"\",\"relativeTimeRange\":{\"from\":18000,\"to\":10800},\"datasourceUid\":\"__expr__\",\"model\":{\"expression\":\"2 + 3 \\u003e 1\",\"intervalMs\":1000,\"maxDataPoints\":43200,\"type\":\"math\"}}]",
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
@ -530,6 +536,7 @@ func TestIntegrationPrometheusRulesFilterByDashboard(t *testing.T) {
|
|||
"__panelId__": "1"
|
||||
},
|
||||
"health": "ok",
|
||||
"isPaused": false,
|
||||
"type": "alerting",
|
||||
"lastEvaluation": "0001-01-01T00:00:00Z",
|
||||
"evaluationTime": 0
|
||||
|
|
|
|||
|
|
@ -12894,6 +12894,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"keepFiringFor": {
|
||||
"type": "number",
|
||||
"format": "double"
|
||||
|
|
@ -12911,6 +12914,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"queriedDatasourceUIDs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
|
@ -20152,6 +20158,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"labels": {
|
||||
"$ref": "#/definitions/Labels"
|
||||
},
|
||||
|
|
@ -20165,6 +20174,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/definitions/AlertRuleNotificationSettings"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -2946,6 +2946,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"keepFiringFor": {
|
||||
"format": "double",
|
||||
"type": "number"
|
||||
|
|
@ -2963,6 +2966,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/components/schemas/AlertRuleNotificationSettings"
|
||||
},
|
||||
"queriedDatasourceUIDs": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
|
|
@ -10207,6 +10213,9 @@
|
|||
"health": {
|
||||
"type": "string"
|
||||
},
|
||||
"isPaused": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"labels": {
|
||||
"$ref": "#/components/schemas/Labels"
|
||||
},
|
||||
|
|
@ -10220,6 +10229,9 @@
|
|||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"notificationSettings": {
|
||||
"$ref": "#/components/schemas/AlertRuleNotificationSettings"
|
||||
},
|
||||
"query": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
|
|||
Loading…
Reference in New Issue