diff --git a/doc/10-Channels.md b/doc/10-Channels.md index 506ca637..e65ad569 100644 --- a/doc/10-Channels.md +++ b/doc/10-Channels.md @@ -256,16 +256,6 @@ or if the channel is missing required configuration values. "tags": { "host": "dummy-816", "service": "random fortune" - }, - "extra_tags": { - "hostgroup/app-mobile": "", - "hostgroup/department-dev": "", - "hostgroup/env-prod": "", - "hostgroup/location-rome": "", - "servicegroup/app-storage": "", - "servicegroup/department-ps": "", - "servicegroup/env-prod": "", - "servicegroup/location-rome": "" } }, "incident": { diff --git a/doc/20-HTTP-API.md b/doc/20-HTTP-API.md index f482fa64..b47cb517 100644 --- a/doc/20-HTTP-API.md +++ b/doc/20-HTTP-API.md @@ -19,14 +19,14 @@ The authentication is performed via HTTP Basic Authentication using the source's When upgrading a setup from an earlier version, these usernames are still valid, but can be changed in Icinga Notifications Web. Events sent to Icinga Notifications are expected to match rules that describe further event escalations. -These rules can be created in the web interface. -Next to an array of `rule_ids`, a `rules_version` must be provided to ensure that the source has no outdated state. +These rules can be configured in Icinga Notifications Web and should be designed to match the `relations` of the +submitted events. When submitting an event without the expected relations to evaluate the rules, Icinga Notifications +will reject the request with a `422 Unprocessable Entity` status code and a message describing the missing relations +when the `XX-Icinga-Reject-If-Relations-Incomplete` header is set to `true`. Otherwise, the request will be accepted +nonetheless, when either there's an existing incident for the event's objects, the ongoing event causes a new incident +to be opened, or the source have at least one event rule without a configured object filter. -When the submitted `rules_version` is either outdated or empty, the `/process-event` endpoint returns an HTTP 412 response. -The response's body is a JSON-encoded version of the -[`RulesInfo`](https://github.com/Icinga/icinga-go-library/blob/main/notifications/source/client.go), -containing the latest `rules_version` together with all rules for this source. -After reevaluating these rules, one can resubmit the event with the updated `rules_version`. +An example request to submit an event looks like this: ``` curl -v -u 'source-2:insecureinsecure' -d '@-' 'http://localhost:5680/process-event' < 255 { - return fmt.Errorf( - "invalid event: extra tag %q is too long, at most 255 chars allowed, %d given", tag, len(tag), - ) - } - } - if e.SourceId == 0 { return fmt.Errorf("invalid event: source ID must not be empty") } @@ -120,6 +123,140 @@ func (e *Event) Sync(ctx context.Context, tx *sqlx.Tx, db *database.DB, objectId return err } +// ExtractMissingRelations determines which of the given filter columns are missing in the Relations field of this event. +// +// It evaluates the filter columns as JSONPath expressions against the Relations field and returns +// a list of filter columns that do not have any matching nodes in the Relations field and are not +// part of the CompleteRelations field. For filter columns that do have matching nodes, it caches +// the evaluated nodes for potential later use during rules evaluation. +func (e *Event) ExtractMissingRelations(filterColumns ...[]string) []string { + if e.evaluatedRelations == nil { + e.evaluatedRelations = make(map[string]jsonpath.NodeList) + } + + jpp := pool.GetJSONPathParser() + defer pool.PutJSONPathParser(jpp) + + // completePaths caches the parsed JSONPath expressions of the complete relations. + completePaths := map[string]*jsonpath.Path{} + var result []string +filterColumnsLoop: + for _, columns := range filterColumns { + var missing []string + for _, filterColumn := range columns { + if _, cached := e.evaluatedRelations[filterColumn]; cached { + continue + } + // This should never panic, as the filter columns have already been validated when loading the rules. + path := jpp.MustParse(utils.PrefixWithJSONPathRootSelector(filterColumn)) + if nodes := path.Select(e.Relations); len(nodes) == 0 { + isComplete := slices.ContainsFunc(e.CompleteRelations, func(relation string) bool { + completePath, ok := completePaths[relation] + if !ok { + // If we can't parse the provided relation as a JSONPath expression, just ignore it and treat + // it as a non-matching relation (but still cache the failed parsing result to avoid trying to + // parse it again for the next filter column). + completePath, _ = jpp.Parse(utils.PrefixWithJSONPathRootSelector(relation)) + completePaths[relation] = completePath + } + return completePath != nil && strings.HasPrefix(path.String(), completePath.String()) + }) + if !isComplete { + missing = append(missing, filterColumn) + } + } else { + // Cache the evaluated nodes for this filter column for potentially later use during rules evaluation. + e.evaluatedRelations[filterColumn] = nodes + // Stop evaluating the remaining filter columns of this list, as we only need to + // find one matching column for the condition to be potentially satisfied. + continue filterColumnsLoop + } + } + for _, column := range missing { + if !slices.Contains(result, column) { + result = append(result, column) + } + } + } + return result +} + +func (e *Event) EvalEqual(attrs, value any) (bool, error) { + return slices.ContainsFunc(e.retrieveValuesFor(attrs), func(v any) bool { + result, err := utils.CompareAny(v, value) + if err != nil { + return false + } + return result == 0 + }), nil +} + +func (e *Event) EvalLess(attrs, value any) (bool, error) { + return slices.ContainsFunc(e.retrieveValuesFor(attrs), func(v any) bool { + result, err := utils.CompareAny(v, value) + if err != nil { + return false + } + return result < 0 + }), nil +} + +func (e *Event) EvalLike(attrs, value any) (bool, error) { + // Wildcard matching can't be implemented with types other than string, so convert it to a string unconditionally. + rgx, err := regexp.Compile(fmt.Sprint(value)) + if err != nil { + return false, err + } + + return slices.ContainsFunc(e.retrieveValuesFor(attrs), func(v any) bool { + if _, ok := v.(map[string]any); ok { + return false + } + if _, ok := v.([]any); ok { + return false + } + return rgx.MatchString(fmt.Sprint(v)) + }), nil +} + +func (e *Event) EvalLessOrEqual(attrs, value any) (bool, error) { + return slices.ContainsFunc(e.retrieveValuesFor(attrs), func(v any) bool { + result, err := utils.CompareAny(v, value) + if err != nil { + return false + } + return result <= 0 + }), nil +} + +func (e *Event) EvalExists(attrs any) bool { return len(e.retrieveValuesFor(attrs)) > 0 } + +// retrieveValuesFor retrieves the values for the given key from the Relations field of this event. +func (e *Event) retrieveValuesFor(attrs any) jsonpath.NodeList { + if e.evaluatedRelations == nil { + e.evaluatedRelations = make(map[string]jsonpath.NodeList) + } + + if attrs, ok := attrs.([]string); ok { + jpp := pool.GetJSONPathParser() + defer pool.PutJSONPathParser(jpp) + + for _, attr := range attrs { + attr := fmt.Sprint(attr) + nodes, cached := e.evaluatedRelations[attr] + if !cached { + path := jpp.MustParse(utils.PrefixWithJSONPathRootSelector(attr)) + nodes = path.Select(e.Relations) + e.evaluatedRelations[attr] = nodes + } + if len(nodes) > 0 { + return nodes + } + } + } + return nil +} + // EventRow represents a single event database row and isn't an in-memory representation of an event. type EventRow struct { ID int64 `db:"id"` diff --git a/internal/event/event_test.go b/internal/event/event_test.go new file mode 100644 index 00000000..05510fbd --- /dev/null +++ b/internal/event/event_test.go @@ -0,0 +1,155 @@ +package event + +import ( + "encoding/json" + "testing" + + baseEv "github.com/icinga/icinga-go-library/notifications/event" + "github.com/icinga/icinga-notifications/internal/filter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEvent(t *testing.T) { + t.Parallel() + + t.Run("ExtractMissingRelations", func(t *testing.T) { + t.Parallel() + + ev := &Event{ + Event: baseEv.Event{ + CompleteRelations: []string{"host.vars", "host", "services"}, + Relations: map[string]any{ + "host.vars": map[string]any{ + "os": "Linux", + }, + "services": []any{ + map[string]any{ + "name": "service", + "vars": map[string]any{ + "department": "IT", + }, + }, + }, + }, + }, + } + + filterColumns := [][]string{ + {"host.vars.os", "host.vars.arch", "hostgroups[*].name_ci", "services[*].name_ci"}, + {"host.vars.department", "hostgroups[*].name_ci", "servicegroups[*].name"}, + {"services[*].name", "services[*].name_ci", "services[*].vars.department", "hostgroups[*].name"}, + {"services[*].vars.arch", "services[*].vars.department", "hostgroups[*].name", "hostgroups[*].name_ci"}, + } + missingRelations := ev.ExtractMissingRelations(filterColumns...) + require.Lenf(t, missingRelations, 2, "%v", missingRelations) + assert.Equal(t, "hostgroups[*].name_ci", missingRelations[0]) + assert.Equal(t, "servicegroups[*].name", missingRelations[1]) + }) + + t.Run("Filter", func(t *testing.T) { + t.Parallel() + + ev := &Event{ + Event: baseEv.Event{ + Relations: map[string]any{ + "host": map[string]any{ + "name": "test-host", + "vars": map[string]any{ + "dict": map[string]any{ + "key": "value", + "key_int": 42, + "key_float": 3.1415, + "domain": "example.com", + "key_array": []any{ + "value1", + "value2", + map[string]any{ + "dict_in_array": "dict_in_array1", + }, + }, + }, + "array": []any{ + "value1-from-array", + map[string]any{ + "dict_in_array": "dict_in_array2", + }, + map[string]any{ + "dict_in_array": "dict_in_array3", + }, + }, + }, + }, + }, + }, + } + + filterData := []struct { + Expr []byte + Expected bool + }{ + // ... expected positive matches + {Expr: makeJsonFilterExpr(t, []string{"host.name", "host.something"}, "=", "test-host", false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"foo.bar", "host.name"}, "=", "^test.*$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"bar.foo", "host.name"}, "=", "^.*host$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"something.wrong", "host.name"}, "=", "^test.*host$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key", "invalid["}, "=", "^value$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key"}, "!=", "^something.*$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, ">=", 42, false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, "<=", 42, false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, ">=", 5, false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, "=", 42.0, false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_float"}, "<", 3.17, false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.domain"}, ">", "foo.com", false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[0]"}, "!=", "value2", false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[1]"}, "!=", "value1", false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[2].dict_in_array"}, "=", "dict_in_array1", false), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.array[0]"}, "=", "^value1-from.*$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.array[*].dict_in_array"}, "=", "^dict_in_array.*$", true), Expected: true}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[2].dict_in_array"}, "=", "dict_in_array1", false), Expected: true}, + + // ... expected negative matches + {Expr: makeJsonFilterExpr(t, []string{"host.name"}, "=", "wrong-host", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.name"}, "!=", "test-host", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key"}, "=", "wrong-value", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.missing"}, "=", "foo", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.missing"}, "=", "foo", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[3]"}, "=", "value", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[2].missing"}, "=", "foo", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.array[1].dict_in_array"}, "=", "wrong-value", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, "=", 043, false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, "<=", 30, false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_int"}, "<", 5, false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_float"}, ">", 90, false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.domain"}, ">", "notifications.devlab.com", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"service.name"}, "=", "^whatever.*$", true), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.name"}, "!=", "^.*host$", true), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.name"}, "!=", "^test.*host$", true), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict.key_array[2]"}, "=", "dict_in_array1", false), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.array"}, "=", "^value1-from.*$", true), Expected: false}, + {Expr: makeJsonFilterExpr(t, []string{"host.vars.dict"}, "=", "^foo.*bar$", true), Expected: false}, + } + + for _, data := range filterData { + f, err := filter.UnmarshalJSON(data.Expr) + if assert.NoErrorf(t, err, "parsing %q should not fail", data.Expr) { + matched, err := f.Eval(ev) + assert.NoErrorf(t, err, "evaluating %q should not fail", data.Expr) + assert.Equal(t, data.Expected, matched, "unexpected result for %q", data.Expr) + } + } + }) +} + +// makeJsonFilterExpr is a helper function to create a JSON filter expression for testing purposes. +func makeJsonFilterExpr(t *testing.T, jsonPath, operator, value any, isRegex bool) []byte { + data := map[string]any{"attributes": jsonPath, "op": operator} + if isRegex { + data["regex"] = value + } else { + data["value"] = value + } + dataBytes, err := json.Marshal(data) + require.NoError(t, err) + return dataBytes +} diff --git a/internal/filter/contracts.go b/internal/filter/contracts.go index 12543dd0..d8f1a6b9 100644 --- a/internal/filter/contracts.go +++ b/internal/filter/contracts.go @@ -2,11 +2,11 @@ package filter // Filterable is implemented by every filterable type. type Filterable interface { - EvalEqual(key string, value string) (bool, error) - EvalLess(key string, value string) (bool, error) - EvalLike(key string, value string) (bool, error) - EvalLessOrEqual(key string, value string) (bool, error) - EvalExists(key string) bool + EvalEqual(key, value any) (bool, error) + EvalLess(key, value any) (bool, error) + EvalLike(key, value any) (bool, error) + EvalLessOrEqual(key, value any) (bool, error) + EvalExists(key any) bool } // Filter is implemented by every filter chains and filter conditions. diff --git a/internal/filter/parser.go b/internal/filter/parser.go index 71dfe6b4..61081e92 100644 --- a/internal/filter/parser.go +++ b/internal/filter/parser.go @@ -257,24 +257,24 @@ func (p *Parser) createCondition(column string, operator string, value string) ( switch operator { case "=": if strings.Contains(value, "*") { - return &Condition{op: Like, column: column, value: value}, nil + return &Condition{op: Like, attrs: column, value: value}, nil } - return &Condition{op: Equal, column: column, value: value}, nil + return &Condition{op: Equal, attrs: column, value: value}, nil case "!=": if strings.Contains(value, "*") { - return &Condition{op: UnLike, column: column, value: value}, nil + return &Condition{op: UnLike, attrs: column, value: value}, nil } - return &Condition{op: UnEqual, column: column, value: value}, nil + return &Condition{op: UnEqual, attrs: column, value: value}, nil case ">": - return &Condition{op: GreaterThan, column: column, value: value}, nil + return &Condition{op: GreaterThan, attrs: column, value: value}, nil case ">=": - return &Condition{op: GreaterThanEqual, column: column, value: value}, nil + return &Condition{op: GreaterThanEqual, attrs: column, value: value}, nil case "<": - return &Condition{op: LessThan, column: column, value: value}, nil + return &Condition{op: LessThan, attrs: column, value: value}, nil case "<=": - return &Condition{op: LessThanEqual, column: column, value: value}, nil + return &Condition{op: LessThanEqual, attrs: column, value: value}, nil default: return nil, fmt.Errorf("invalid operator %s provided", operator) } diff --git a/internal/filter/parser_test.go b/internal/filter/parser_test.go index ca386f67..90859907 100644 --- a/internal/filter/parser_test.go +++ b/internal/filter/parser_test.go @@ -62,42 +62,42 @@ func TestFilter(t *testing.T) { t.Run("ParserIdentifiesAllKindOfFilters", func(t *testing.T) { rule, err := Parse("foo=bar") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected := &Condition{op: Equal, column: "foo", value: "bar"} + expected := &Condition{op: Equal, attrs: "foo", value: "bar"} assert.Equal(t, expected, rule) rule, err = Parse("foo!=bar") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: UnEqual, column: "foo", value: "bar"} + expected = &Condition{op: UnEqual, attrs: "foo", value: "bar"} assert.Equal(t, expected, rule) rule, err = Parse("foo=bar*") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: Like, column: "foo", value: "bar*"} + expected = &Condition{op: Like, attrs: "foo", value: "bar*"} assert.Equal(t, expected, rule) rule, err = Parse("foo!=bar*") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: UnLike, column: "foo", value: "bar*"} + expected = &Condition{op: UnLike, attrs: "foo", value: "bar*"} assert.Equal(t, expected, rule) rule, err = Parse("foobar") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: GreaterThan, column: "foo", value: "bar"} + expected = &Condition{op: GreaterThan, attrs: "foo", value: "bar"} assert.Equal(t, expected, rule) rule, err = Parse("foo>=bar") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: GreaterThanEqual, column: "foo", value: "bar"} + expected = &Condition{op: GreaterThanEqual, attrs: "foo", value: "bar"} assert.Equal(t, expected, rule) rule, err = Parse("foo=bar&bar=foo") @@ -125,12 +125,12 @@ func TestFilter(t *testing.T) { expectedChain := &Chain{op: All, rules: []Filter{ &Chain{op: None, rules: []Filter{ - &Condition{op: Equal, column: "foo", value: "bar"}, - &Condition{op: Equal, column: "bar", value: "foo"}, + &Condition{op: Equal, attrs: "foo", value: "bar"}, + &Condition{op: Equal, attrs: "bar", value: "foo"}, }}, &Chain{op: Any, rules: []Filter{ - &Condition{op: Equal, column: "foo", value: "bar"}, - &Condition{op: Equal, column: "bar", value: "foo"}, + &Condition{op: Equal, attrs: "foo", value: "bar"}, + &Condition{op: Equal, attrs: "bar", value: "foo"}, }}, }} assert.Equal(t, expectedChain, rule) @@ -140,34 +140,34 @@ func TestFilter(t *testing.T) { rule, err := Parse("foo=bar") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected := &Condition{op: Equal, column: "foo", value: "bar"} + expected := &Condition{op: Equal, attrs: "foo", value: "bar"} assert.Equal(t, expected, rule, "Parser does not parse single condition correctly") }) t.Run("UrlEncodedFilterExpression", func(t *testing.T) { rule, err := Parse("col%3Cumnval%28ue") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: GreaterThan, column: "col(umn", value: "val(ue"} + expected = &Condition{op: GreaterThan, attrs: "col(umn", value: "val(ue"} assert.Equal(t, expected, rule) rule, err = Parse("col%29umn>=val%29ue") assert.Nil(t, err, "There should be no errors but got: %s", err) - expected = &Condition{op: GreaterThanEqual, column: "col)umn", value: "val)ue"} + expected = &Condition{op: GreaterThanEqual, attrs: "col)umn", value: "val)ue"} assert.Equal(t, expected, rule) }) } diff --git a/internal/filter/types.go b/internal/filter/types.go index 4c1104d0..e88703b8 100644 --- a/internal/filter/types.go +++ b/internal/filter/types.go @@ -1,7 +1,14 @@ package filter import ( + "encoding/json" + "errors" "fmt" + "slices" + + "github.com/icinga/icinga-go-library/types" + "github.com/icinga/icinga-notifications/internal/pool" + "github.com/icinga/icinga-notifications/internal/utils" ) // LogicalOp is a type used for grouping the logical operators of a filter string. @@ -100,70 +107,70 @@ const ( // All it's fields are read-only and aren't supposed to change at runtime. For read access, you can // check the available exported methods. type Condition struct { - op CompOperator - column string - value string + op CompOperator + attrs any + value any } // Eval evaluates this Condition based on its operator. // Returns true when the filter evaluates to true false otherwise. func (c *Condition) Eval(filterable Filterable) (bool, error) { - if !filterable.EvalExists(c.column) { + if !filterable.EvalExists(c.attrs) { return false, nil } switch c.op { case Equal: - match, err := filterable.EvalEqual(c.column, c.value) + match, err := filterable.EvalEqual(c.attrs, c.value) if err != nil { return false, err } return match, nil case UnEqual: - match, err := filterable.EvalEqual(c.column, c.value) + match, err := filterable.EvalEqual(c.attrs, c.value) if err != nil { return false, err } return !match, nil case Like: - match, err := filterable.EvalLike(c.column, c.value) + match, err := filterable.EvalLike(c.attrs, c.value) if err != nil { return false, err } return match, nil case UnLike: - match, err := filterable.EvalLike(c.column, c.value) + match, err := filterable.EvalLike(c.attrs, c.value) if err != nil { return false, err } return !match, nil case LessThan: - match, err := filterable.EvalLess(c.column, c.value) + match, err := filterable.EvalLess(c.attrs, c.value) if err != nil { return false, err } return match, nil case LessThanEqual: - match, err := filterable.EvalLessOrEqual(c.column, c.value) + match, err := filterable.EvalLessOrEqual(c.attrs, c.value) if err != nil { return false, err } return match, nil case GreaterThan: - match, err := filterable.EvalLessOrEqual(c.column, c.value) + match, err := filterable.EvalLessOrEqual(c.attrs, c.value) if err != nil { return false, err } return !match, nil case GreaterThanEqual: - match, err := filterable.EvalLess(c.column, c.value) + match, err := filterable.EvalLess(c.attrs, c.value) if err != nil { return false, err } @@ -178,13 +185,11 @@ func (c *Condition) ExtractConditions() []*Condition { return []*Condition{c} } -// Column returns the column of this Condition. -func (c *Condition) Column() string { - return c.column -} +// Attributes returns the list of attributes this condition refers to. +func (c *Condition) Attributes() any { return c.attrs } // Value returns the value of this Condition. -func (c *Condition) Value() string { +func (c *Condition) Value() any { return c.value } @@ -209,3 +214,130 @@ var ( _ Filter = (*Exists)(nil) _ Filter = (*Condition)(nil) ) + +// UnmarshalJSON is a helper function to unmarshal a JSON representation of a filter into a [Filter] interface. +// +// It recursively parses the JSON data to deduce the filter type ([Chain] or [Condition]) based on the `op` field +// and constructs the appropriate filter structure. +// +// Returns nil if JSON null value is provided, and an error if the JSON is invalid or if required fields are missing. +func UnmarshalJSON(data []byte) (Filter, error) { + if string(data) == "null" { + return nil, nil + } + + message := map[string]json.RawMessage{} + if err := types.UnmarshalJSON(data, &message); err != nil { + return nil, err + } + + opBytes, opExists := message["op"] + if !opExists { + return nil, fmt.Errorf("missing required field: op") + } + + var op string + if err := types.UnmarshalJSON(opBytes, &op); err != nil { + return nil, err + } + + if isLogicalOp(op) { + rulesBytes, exists := message["rules"] + if !exists { + return nil, fmt.Errorf("missing required field: rules") + } + + var rules []json.RawMessage + if err := json.Unmarshal(rulesBytes, &rules); err != nil { + return nil, err + } + chain := &Chain{op: LogicalOp(op)} + for _, rawRule := range rules { + filter, err := UnmarshalJSON(rawRule) + if err != nil { + return nil, err + } + chain.rules = append(chain.rules, filter) + } + return chain, nil + } + + if isCompOperator(op) { + condition := &Condition{op: CompOperator(op)} + var attrs []string + if attrsBytes, exists := message["attributes"]; !exists { + return nil, fmt.Errorf("missing required filter condition field: attributes") + } else if err := types.UnmarshalJSON(attrsBytes, &attrs); err != nil { + return nil, err + } + + var value any // The JSON value might represent any type, so we can't directly unmarshal it into a string. + if rawValue, exists := message["value"]; !exists { + if rawRegex, exists := message["regex"]; !exists { + return nil, fmt.Errorf("missing required filter condition field: value or regex") + } else if err := types.UnmarshalJSON(rawRegex, &value); err != nil { + return nil, err + } + switch condition.op { + case Equal: + condition.op = Like + case UnEqual: + condition.op = UnLike + default: + return nil, fmt.Errorf("regex field is only supported for equality operators (= and !=), but got operator %q", condition.op) + } + } else if err := types.UnmarshalJSON(rawValue, &value); err != nil { + return nil, err + } + condition.value = value + + jpp := pool.GetJSONPathParser() + defer pool.PutJSONPathParser(jpp) + + var errs []error + var invalidAttrs []string + for _, attr := range attrs { + if _, err := jpp.Parse(utils.PrefixWithJSONPathRootSelector(attr)); err != nil { + errs = append(errs, fmt.Errorf("invalid JSONPath expression %q: %w", attr, err)) + invalidAttrs = append(invalidAttrs, attr) + } + } + + if len(errs) > 0 { + for _, path := range invalidAttrs { + attrs = slices.DeleteFunc(attrs, func(p string) bool { return p == path }) + } + // If all provided attrs are invalid, we shouldn't load this rule at all, so bail out with an error. + if len(attrs) == 0 { + return nil, errors.Join(errors.New("all provided JSONPath expressions are invalid"), errors.Join(errs...)) + } + // Otherwise, we have already removed all invalid attrs from the condition but there are still some valid + // attrs left, so we can still load this rule. Logging the errors of the invalid attrs would be preferred + // here instead of dropping them silently, we don't have access to a logger at this point though. + } + condition.attrs = attrs + + return condition, nil + } + return nil, fmt.Errorf("unknown filter operator: %s", op) +} + +// isLogicalOp checks if the provided operator is a valid logical operator. +func isLogicalOp(op string) bool { + switch LogicalOp(op) { + case All, Any, None: + return true + default: + return false + } +} + +// isCompOperator checks if the provided operator is a valid comparison operator. +func isCompOperator(op string) bool { + switch CompOperator(op) { + case Equal, UnEqual, GreaterThan, LessThan, GreaterThanEqual, LessThanEqual: + return true + default: + return false + } +} diff --git a/internal/filter/types_test.go b/internal/filter/types_test.go new file mode 100644 index 00000000..784bf22f --- /dev/null +++ b/internal/filter/types_test.go @@ -0,0 +1,212 @@ +package filter + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUnmarshalJSON(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + json string + expectErr bool + errString string + verify func(t *testing.T, f Filter) + }{ + { + name: "Null JSON", + json: "null", + expectErr: false, + verify: func(t *testing.T, f Filter) { assert.Nil(t, f) }, + }, + { + name: "Filter Condition", + json: `{"op":"=","attributes":["foo"],"value":"bar"}`, + expectErr: false, + verify: func(t *testing.T, f Filter) { + c, ok := f.(*Condition) + require.Truef(t, ok, "expected Condition, got %T", f) + assert.Equal(t, Equal, c.op) + assert.Len(t, c.Attributes(), 1) + assert.Contains(t, c.Attributes(), "foo") + assert.Equal(t, "bar", c.Value()) + }, + }, + { + name: "Simple Filter Chain", + json: `{ + "op": "&", + "rules": [ + { + "op": "=", + "attributes": ["hostgroups[*].name"], + "regex": "^.*lin.*$" + }, + { + "op": "=", + "attributes": ["host.user.name"], + "value": "icingaadmin" + }, + { + "op": "!=", + "attributes": ["host.vars['http_vhosts']['http']['http_uri']"], + "value": "\/" + } + ] +}`, + expectErr: false, + verify: func(t *testing.T, f Filter) { + ch, ok := f.(*Chain) + require.Truef(t, ok, "expected Chain, got %T", f) + assert.Equal(t, All, ch.op) + assert.Len(t, ch.rules, 3) + + for _, condition := range f.ExtractConditions() { + switch condition.op { + case Like: + assert.Len(t, condition.Attributes(), 1) + assert.Contains(t, condition.Attributes(), "hostgroups[*].name") + assert.Equal(t, "^.*lin.*$", condition.Value()) + case Equal: + assert.Equal(t, Equal, condition.op) + assert.Len(t, condition.Attributes(), 1) + assert.Contains(t, condition.Attributes(), "host.user.name") + assert.Equal(t, "icingaadmin", condition.Value()) + default: + assert.Equal(t, UnEqual, condition.op) + assert.Len(t, condition.Attributes(), 1) + assert.Contains(t, condition.Attributes(), "host.vars['http_vhosts']['http']['http_uri']") + assert.Equal(t, "/", condition.Value()) + } + } + }, + }, + { + name: "Nested Filter Chain", + json: `{"op":"&","rules":[{"op":"=","attributes":["x"],"regex":"^v.*$"},{"op":"|","rules":[{"op":"!=","attributes":["y"],"regex":"^some.*$"},{"op":"!=","attributes":["z"],"value":2}]}]}`, + expectErr: false, + verify: func(t *testing.T, f Filter) { + ch, ok := f.(*Chain) + require.Truef(t, ok, "expected Chain, got %T", f) + assert.Equal(t, All, ch.op) + assert.Len(t, ch.rules, 2) + + condition, ok := ch.rules[0].(*Condition) + require.Truef(t, ok, "expected Condition, got %T", ch.rules[0]) + assert.Equal(t, Like, condition.op) + assert.Len(t, condition.Attributes(), 1) + assert.Contains(t, condition.Attributes(), "x") + assert.Equal(t, "^v.*$", condition.value) + + ch2, ok := ch.rules[1].(*Chain) + require.Truef(t, ok, "expected Condition, got %T", ch.rules[1]) + assert.Equal(t, Any, ch2.op) + assert.Len(t, ch2.rules, 2) + + for _, cond := range ch2.ExtractConditions() { + if cond.op == UnLike { + assert.Len(t, cond.Attributes(), 1) + assert.Contains(t, cond.Attributes(), "y") + assert.Equal(t, "^some.*$", cond.Value()) + } else { + assert.Equal(t, UnEqual, cond.op) + require.Len(t, cond.Attributes(), 1) + assert.Contains(t, cond.Attributes(), "z") + assert.Equal(t, 2.0, cond.Value()) + } + } + }, + }, + { + name: "Invalid Attributes", + json: `{"op":"=","attributes":["invalid[", "lol]", "do.something"],"value":1}`, + verify: func(t *testing.T, f Filter) { + ch, ok := f.(*Condition) + require.Truef(t, ok, "expected Condition, got %T", f) + assert.Equal(t, Equal, ch.op) + assert.Len(t, ch.Attributes(), 1) + assert.Contains(t, ch.Attributes(), "do.something") + assert.Equal(t, 1.0, ch.Value()) + }, + }, + { + name: "Regex With Invalid Operator", + json: `{"op":"<","attributes":["x"],"regex":"^v.*$"}`, + expectErr: true, + errString: "regex field is only supported for equality operators (= and !=), but got operator", + }, + { + name: "Missing Value and Regex", + json: `{"op":"=","attributes":["x"]}`, + expectErr: true, + errString: "missing required filter condition field: value or regex", + }, + { + name: "Missing Operator", + json: `{"attributes":["a"],"value":"1"}`, + expectErr: true, + errString: "missing required field: op", + }, + { + name: "Unknown Operator", + json: `{"op":"?","attributes":["a"],"value":"1"}`, + expectErr: true, + errString: "unknown filter operator", + }, + { + name: "Missing Chain Rules", + json: `{"op":"&"}`, + expectErr: true, + errString: "missing required field: rules", + }, + { + name: "Missing Filter Paths", + json: `{"op":"=","value":"1"}`, + expectErr: true, + errString: "missing required filter condition field: attributes", + }, + { + name: "Rules Not an Array", + json: `{"op":"!","rules":"notarray"}`, + expectErr: true, + // error message from json.Unmarshal when trying to unmarshal a string into a []json.RawMessage + errString: "cannot unmarshal string into Go value of type", + }, + { + name: "Invalid JSON", + json: `not a json`, + expectErr: true, + errString: "invalid character", + }, + { + name: "Invalid JSONPath", + json: `{"op":"=","attributes":["invalid[", "lol]"],"value":"1"}`, + expectErr: true, + errString: "unexpected eof", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + f, err := UnmarshalJSON([]byte(tc.json)) + if tc.expectErr { + assert.Errorf(t, err, "expected error but got nil; filter=%#v", f) + assert.Nil(t, f) + if tc.errString != "" { + assert.ErrorContainsf(t, err, tc.errString, "error mismatch: want contains %q, got %q", tc.errString, err.Error()) + } + return + } + require.NoErrorf(t, err, "unexpected error: %v", err) + if tc.verify != nil { + tc.verify(t, f) + } + }) + } +} diff --git a/internal/incident/incident.go b/internal/incident/incident.go index d674ba9b..1fc05809 100644 --- a/internal/incident/incident.go +++ b/internal/incident/incident.go @@ -16,7 +16,6 @@ import ( "github.com/icinga/icinga-notifications/internal/rule" "github.com/jmoiron/sqlx" "go.uber.org/zap" - "strconv" "sync" "time" ) @@ -429,33 +428,33 @@ func (i *Incident) applyMatchingRules(ctx context.Context, tx *sqlx.Tx, ev *even i.Rules = make(map[int64]struct{}) } - for _, ruleId := range ev.RuleIds { - ruleIdInt, err := strconv.ParseInt(ruleId, 10, 64) - if err != nil { - i.logger.Errorw("Event rule is not an integer", zap.String("rule_id", ruleId), zap.Error(err)) - return fmt.Errorf("cannot convert rule id %q to an int: %w", ruleId, err) - } + src, ok := i.runtimeConfig.Sources[ev.SourceId] + if !ok { + i.logger.Warnw("Received event from unknown source, might got deleted", zap.Int64("source_id", ev.SourceId)) + return nil + } - r, ok := i.runtimeConfig.Rules[ruleIdInt] - if !ok { - i.logger.Errorw("Event refers to non-existing event rule, might got deleted", zap.Int64("rule_id", ruleIdInt)) - return fmt.Errorf("cannot apply unknown rule %d", ruleIdInt) - } + for _, id := range src.RuleIDs() { + if _, ok := i.Rules[id]; !ok { + r, ok := i.runtimeConfig.Rules[id] + if !ok { + i.logger.Errorw("BUG: source references unknown event rule", zap.Object("source", src)) + continue + } - if r.SourceID != ev.SourceId { - i.logger.Errorw("Rule source ID does not match event source ID", - zap.Int64("event_source_id", ev.SourceId), - zap.Int64("rule_source_id", r.SourceID), - zap.Int64("rule_id", ruleIdInt)) - return fmt.Errorf("rule %d source ID %d does not match event source %d", ruleIdInt, r.SourceID, ev.SourceId) - } + matched, err := r.Eval(ev) + if err != nil { + i.logger.Errorw("Failed to evaluate object filter", zap.Object("rule", r), zap.Error(err)) + } + + if err != nil || !matched { + continue + } - if _, ok := i.Rules[r.ID]; !ok { i.Rules[r.ID] = struct{}{} i.logger.Infow("Rule matches", zap.Object("rule", r)) - err := i.AddRuleMatched(ctx, tx, r) - if err != nil { + if err := i.AddRuleMatched(ctx, tx, r); err != nil { i.logger.Errorw("Failed to upsert incident rule", zap.Object("rule", r), zap.Error(err)) return err } diff --git a/internal/incident/incidents.go b/internal/incident/incidents.go index f6ec183d..560dcc60 100644 --- a/internal/incident/incidents.go +++ b/internal/incident/incidents.go @@ -238,14 +238,13 @@ func ProcessEvent( return fmt.Errorf("cannot sync event object: %w", err) } - createIncident := ev.Severity != baseEv.SeverityNone && ev.Severity != baseEv.SeverityOK currentIncident, err := GetCurrent( ctx, db, obj, logs.GetChildLogger("incident"), runtimeConfig, - createIncident) + CanOpenNewIncident(ev)) if err != nil { return fmt.Errorf("cannot get current incident for %q: %w", obj.DisplayName(), err) } @@ -278,3 +277,16 @@ func ProcessEvent( return currentIncident.ProcessEvent(ctx, ev) } + +// CanOpenNewIncident returns true if the given event can open a new incident if there is no active one yet. +func CanOpenNewIncident(ev *event.Event) bool { + return ev.Severity != baseEv.SeverityNone && ev.Severity != baseEv.SeverityOK +} + +// HasCurrent returns true if there is an active incident for the given object. +func HasCurrent(obj *object.Object) bool { + currentIncidentsMu.Lock() + defer currentIncidentsMu.Unlock() + + return currentIncidents[obj] != nil +} diff --git a/internal/incident/incidents_test.go b/internal/incident/incidents_test.go index 627141f9..cc83b1f1 100644 --- a/internal/incident/incidents_test.go +++ b/internal/incident/incidents_test.go @@ -156,10 +156,6 @@ func makeIncident(ctx context.Context, db *database.DB, t *testing.T, sourceID i "host": testutils.MakeRandomString(t), "service": testutils.MakeRandomString(t), }, - ExtraTags: map[string]string{ - "hostgroup/database-server": "", - "servicegroup/webserver": "", - }, }, } diff --git a/internal/listener/listener.go b/internal/listener/listener.go index 794d8466..a793aac6 100644 --- a/internal/listener/listener.go +++ b/internal/listener/listener.go @@ -8,16 +8,16 @@ import ( "fmt" "github.com/icinga/icinga-go-library/database" "github.com/icinga/icinga-go-library/logging" + "github.com/icinga/icinga-go-library/notifications" baseEv "github.com/icinga/icinga-go-library/notifications/event" - baseSource "github.com/icinga/icinga-go-library/notifications/source" "github.com/icinga/icinga-notifications/internal" "github.com/icinga/icinga-notifications/internal/config" "github.com/icinga/icinga-notifications/internal/daemon" "github.com/icinga/icinga-notifications/internal/event" "github.com/icinga/icinga-notifications/internal/incident" + "github.com/icinga/icinga-notifications/internal/object" "go.uber.org/zap" "net/http" - "strconv" "time" ) @@ -138,19 +138,6 @@ func (l *Listener) ProcessEvent(w http.ResponseWriter, r *http.Request) { return } - // If the client uses an outdated rules version, reject the request but also send the current rules version - // and rules for this source back to the client, so it can retry the request with the updated rules. - if latestRuleVersion := l.runtimeConfig.GetRulesVersionFor(src.ID); ev.RulesVersion != latestRuleVersion { - w.WriteHeader(http.StatusPreconditionFailed) - l.writeSourceRulesInfo(w, src) - - l.logger.Debugw("Abort event processing due to outdated rules version", - zap.String("current_version", latestRuleVersion), - zap.String("provided_version", ev.RulesVersion), - zap.String("source", src.Name)) - return - } - ev.CompleteURL(daemon.Config().Icingaweb2URL) ev.Time = time.Now() ev.SourceId = src.ID @@ -167,6 +154,13 @@ func (l *Listener) ProcessEvent(w http.ResponseWriter, r *http.Request) { return } + filterColumns, hasRulesWithoutFilter := l.runtimeConfig.GetRulesFilterColumnsForSource(src) + missingRelations := ev.ExtractMissingRelations(filterColumns...) + if len(missingRelations) > 0 && ShouldRejectRequestOnIncompleteRelations(r, &ev, hasRulesWithoutFilter) { + l.sendMissingAttrsError(w, &ev, missingRelations) + return + } + l.logger.Infow("Processing event", zap.String("event", ev.String())) err := incident.ProcessEvent(context.Background(), l.db, l.logs, l.runtimeConfig, &ev) if errors.Is(err, event.ErrSuperfluousStateChange) || errors.Is(err, event.ErrSuperfluousMuteUnmuteEvent) { @@ -347,35 +341,40 @@ func (l *Listener) DumpRules(w http.ResponseWriter, r *http.Request) { _ = enc.Encode(l.runtimeConfig.Rules) } -// writeSourceRulesInfo writes the rules information for a specific source to the response writer. -// -// Internally, it converts the data to [baseSource.RulesInfo], being serialized JSON-encoded. -func (l *Listener) writeSourceRulesInfo(w http.ResponseWriter, source *config.Source) { - rulesInfo := baseSource.RulesInfo{ - Version: config.NoRulesVersion, - } +// sendMissingAttrsError sends a response with status code 422 Unprocessable Entity to the client. +func (l *Listener) sendMissingAttrsError(w http.ResponseWriter, ev *event.Event, missingAttrs []string) { + l.logger.Infow( + "Event is missing attributes required for rule evaluation", + zap.Stringer("event", ev), + zap.Strings("missing_attributes", missingAttrs), + ) - func() { // Use a function to ensure that the RLock and RUnlock are called before writing the response. - l.runtimeConfig.RLock() - defer l.runtimeConfig.RUnlock() + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusUnprocessableEntity) - if sourceInfo, ok := l.runtimeConfig.RulesBySource[source.ID]; ok { - rulesInfo.Version = sourceInfo.Version.String() - rulesInfo.Rules = make(map[string]string) - - for _, rID := range sourceInfo.RuleIDs { - id := strconv.FormatInt(rID, 10) - filterExpr := "" - if l.runtimeConfig.Rules[rID].ObjectFilterExpr.Valid { - filterExpr = l.runtimeConfig.Rules[rID].ObjectFilterExpr.String - } + resp := map[string]any{ + "type": "attrs_negotiation", + "attributes": missingAttrs, + } - rulesInfo.Rules[id] = filterExpr - } - } - }() + if err := json.NewEncoder(w).Encode(resp); err != nil { + l.logger.Errorw("Failed to send missing attributes required for rule evaluation", zap.Error(err)) + return + } +} - enc := json.NewEncoder(w) - enc.SetIndent("", " ") - _ = enc.Encode(rulesInfo) +// ShouldRejectRequestOnIncompleteRelations determines whether a request with incomplete relations should be rejected. +// +// This function always returns true if the client explicitly requested to reject such events by setting the +// [notifications.XIcingaRejectIfRelationsIncomplete] HTTP header. Otherwise, it only returns true when the +// src doesn't have any rules without an object filter and the event doesn't cause a new incident to be opened +// and there's no active one yet for the event's source object. +func ShouldRejectRequestOnIncompleteRelations(r *http.Request, ev *event.Event, hasRulesWithoutFilter bool) bool { + if r.Header.Get(notifications.XIcingaRejectIfRelationsIncomplete) == "true" { + return true + } + if hasRulesWithoutFilter { + return false + } + return !incident.CanOpenNewIncident(ev) && !incident.HasCurrent(object.GetFromCache(object.ID(ev.SourceId, ev.Tags))) } diff --git a/internal/object/db_types.go b/internal/object/db_types.go index 70ad1ffb..fb048104 100644 --- a/internal/object/db_types.go +++ b/internal/object/db_types.go @@ -2,24 +2,13 @@ package object import "github.com/icinga/icinga-go-library/types" -// TagRow is a base type for IdTagRow and ExtraTagRow -type TagRow struct { +// IdTagRow represents a single database object id tag. +type IdTagRow struct { ObjectId types.Binary `db:"object_id"` Tag string `db:"tag"` Value string `db:"value"` } -// ExtraTagRow represents a single database object extra tag like `hostgroup/foo: null`. -type ExtraTagRow TagRow - -// TableName implements the contracts.TableNamer interface. -func (e *ExtraTagRow) TableName() string { - return "object_extra_tag" -} - -// IdTagRow represents a single database object id tag. -type IdTagRow TagRow - // TableName implements the contracts.TableNamer interface. func (e *IdTagRow) TableName() string { return "object_id_tag" diff --git a/internal/object/object.go b/internal/object/object.go index a4b30951..054559fb 100644 --- a/internal/object/object.go +++ b/internal/object/object.go @@ -22,8 +22,7 @@ type Object struct { URL types.String `db:"url"` MuteReason types.String `db:"mute_reason"` - Tags map[string]string `db:"-"` - ExtraTags map[string]string `db:"-"` + Tags map[string]string `db:"-"` db *database.DB } @@ -31,12 +30,11 @@ type Object struct { // New creates a new object from the given event. func New(db *database.DB, ev *event.Event) *Object { obj := &Object{ - SourceID: ev.SourceId, - Name: ev.Name, - db: db, - URL: types.MakeString(ev.URL, types.TransformEmptyStringToNull), - Tags: ev.Tags, - ExtraTags: ev.ExtraTags, + SourceID: ev.SourceId, + Name: ev.Name, + db: db, + URL: types.MakeString(ev.URL, types.TransformEmptyStringToNull), + Tags: ev.Tags, } if ev.Mute.Valid && ev.Mute.Bool { obj.MuteReason = types.String{NullString: sql.NullString{String: ev.MuteReason, Valid: true}} @@ -80,7 +78,6 @@ func FromEvent(ctx context.Context, db *database.DB, ev *event.Event) (*Object, } else { *newObject = *object - newObject.ExtraTags = ev.ExtraTags newObject.Name = ev.Name newObject.URL = types.MakeString(ev.URL, types.TransformEmptyStringToNull) if ev.Mute.Valid { @@ -106,25 +103,11 @@ func FromEvent(ctx context.Context, db *database.DB, ev *event.Event) (*Object, } stmt, _ = db.BuildUpsertStmt(&IdTagRow{}) - _, err = tx.NamedExecContext(ctx, stmt, mapToTagRows(newObject.ID, ev.Tags)) + _, err = tx.NamedExecContext(ctx, stmt, mapToIdTagRows(newObject.ID, ev.Tags)) if err != nil { return nil, fmt.Errorf("failed to upsert object id tags: %w", err) } - extraTag := &ExtraTagRow{ObjectId: newObject.ID} - _, err = tx.NamedExecContext(ctx, `DELETE FROM "object_extra_tag" WHERE "object_id" = :object_id`, extraTag) - if err != nil { - return nil, fmt.Errorf("failed to delete object extra tags: %w", err) - } - - if len(ev.ExtraTags) > 0 { - stmt, _ := db.BuildInsertStmt(extraTag) - _, err = tx.NamedExecContext(ctx, stmt, mapToTagRows(newObject.ID, ev.ExtraTags)) - if err != nil { - return nil, fmt.Errorf("failed to insert object extra tags: %w", err) - } - } - if err = tx.Commit(); err != nil { return nil, fmt.Errorf("cannot commit object database transaction: %w", err) } @@ -171,15 +154,6 @@ func (o *Object) String() string { _, _ = fmt.Fprintf(&b, " Source %d:\n", o.SourceID) _, _ = fmt.Fprintf(&b, " Name: %q\n", o.Name) _, _ = fmt.Fprintf(&b, " URL: %q\n", o.URL.String) - _, _ = fmt.Fprintf(&b, " Extra Tags:\n") - - for tag, value := range o.ExtraTags { - _, _ = fmt.Fprintf(&b, " %q", tag) - if value != "" { - _, _ = fmt.Fprintf(&b, " = %q", value) - } - _, _ = fmt.Fprintf(&b, "\n") - } return b.String() } @@ -218,11 +192,11 @@ func ID(source int64, tags map[string]string) types.Binary { return h.Sum(nil) } -// mapToTagRows transforms the object (extra) tags map to a slice of TagRow struct. -func mapToTagRows(objectId types.Binary, extraTags map[string]string) []*TagRow { - var tagRows []*TagRow - for key, val := range extraTags { - tagRows = append(tagRows, &TagRow{ +// mapToIdTagRows transforms the object tags map to a slice of TagRow struct. +func mapToIdTagRows(objectId types.Binary, tags map[string]string) []*IdTagRow { + var tagRows []*IdTagRow + for key, val := range tags { + tagRows = append(tagRows, &IdTagRow{ ObjectId: objectId, Tag: key, Value: val, diff --git a/internal/object/objects.go b/internal/object/objects.go index fbfa82c0..ceb494ed 100644 --- a/internal/object/objects.go +++ b/internal/object/objects.go @@ -61,7 +61,6 @@ func restoreObjectsFromQuery(ctx context.Context, db *database.DB, query string, err := utils.ExecAndApply[Object](ctx, db, query, args, func(o *Object) { o.db = db o.Tags = map[string]string{} - o.ExtraTags = map[string]string{} select { case objects <- o: @@ -100,14 +99,6 @@ func restoreObjectsFromQuery(ctx context.Context, db *database.DB, query string, return errors.Wrap(err, "cannot restore objects ID tags") } - // Restore object extra tags matching the given object ids - err = utils.ForEachRow[ExtraTagRow](ctx, db, "object_id", ids, func(et *ExtraTagRow) { - objectsMap[et.ObjectId.String()].ExtraTags[et.Tag] = et.Value - }) - if err != nil { - return errors.Wrap(err, "cannot restore objects extra tags") - } - cacheMu.Lock() defer cacheMu.Unlock() diff --git a/internal/object/objects_test.go b/internal/object/objects_test.go index 335c53a0..9a325639 100644 --- a/internal/object/objects_test.go +++ b/internal/object/objects_test.go @@ -69,16 +69,12 @@ func TestRestoreMutedObjects(t *testing.T) { assert.Equal(t, o.URL, objFromCache.URL, "objects url should match") assert.Equal(t, o.Tags, objFromCache.Tags, "objects tags should match") - assert.Equal(t, o.ExtraTags, objFromCache.ExtraTags, "objects tags should match") } // Purge all newly created objects and their relations not mes up local database tests. _, err = db.NamedExecContext(ctx, `DELETE FROM object_id_tag WHERE object_id = :id`, o) assert.NoError(t, err, "deleting object id tags should not fail") - _, err = db.NamedExecContext(ctx, `DELETE FROM object_extra_tag WHERE object_id = :id`, o) - assert.NoError(t, err, "deleting object extra tags should not fail") - _, err = db.NamedExecContext(ctx, `DELETE FROM object WHERE id = :id`, o) assert.NoError(t, err, "deleting object should not fail") } @@ -96,10 +92,6 @@ func makeObject(ctx context.Context, db *database.DB, t *testing.T, sourceID int "host": testutils.MakeRandomString(t), "service": testutils.MakeRandomString(t), }, - ExtraTags: map[string]string{ - "hostgroup/database-server": "", - "servicegroup/webserver": "", - }, }, } diff --git a/internal/pool/jsonpath.go b/internal/pool/jsonpath.go new file mode 100644 index 00000000..dad413be --- /dev/null +++ b/internal/pool/jsonpath.go @@ -0,0 +1,34 @@ +package pool + +import ( + "sync" + + "github.com/theory/jsonpath" +) + +// jsonPathParserPool is a pool of JSONPath parsers to avoid the overhead of creating new parsers for each evaluation. +// +// JSONPath parsers are used to evaluate JSONPath expressions of event rule filters against the events. +// Since the parser doesn't cache any state specific to any given expressions, it can be safely reused +// across multiple evaluations, and thus we can use a pool to reduce the overhead of creating new parsers +// for each evaluation. +var jsonPathParserPool = sync.Pool{ + New: func() any { + return jsonpath.NewParser() + }, +} + +// GetJSONPathParser retrieves a JSONPath parser from the pool. +// +// The caller is responsible for returning the parser to the pool after use by calling [PutJSONPathParser]. +func GetJSONPathParser() *jsonpath.Parser { + return jsonPathParserPool.Get().(*jsonpath.Parser) //nolint:forcetypeassert +} + +// PutJSONPathParser returns a JSONPath parser to the pool. +// +// The caller should call this function after using a parser retrieved from the pool to allow +// it to be reused for future evaluations. +func PutJSONPathParser(parser *jsonpath.Parser) { + jsonPathParserPool.Put(parser) +} diff --git a/internal/rule/condition.go b/internal/rule/condition.go index 485cedff..4dc51a2a 100644 --- a/internal/rule/condition.go +++ b/internal/rule/condition.go @@ -28,8 +28,8 @@ type EscalationFilter struct { func (e *EscalationFilter) ReevaluateAfter(escalationCond filter.Filter) time.Duration { retryAfter := RetryNever for _, condition := range escalationCond.ExtractConditions() { - if condition.Column() == "incident_age" { - v, err := time.ParseDuration(condition.Value()) + if condition.Attributes() == "incident_age" { + v, err := time.ParseDuration(fmt.Sprint(condition.Value())) if err == nil && v > e.IncidentAge { // The incident age is compared with a value in the future. Once that age is // reached, the escalation could trigger, so consider that time for reevaluation. @@ -41,17 +41,17 @@ func (e *EscalationFilter) ReevaluateAfter(escalationCond filter.Filter) time.Du return retryAfter } -func (e *EscalationFilter) EvalEqual(key string, value string) (bool, error) { +func (e *EscalationFilter) EvalEqual(key, value any) (bool, error) { switch key { case "incident_age": - age, err := time.ParseDuration(value) + age, err := time.ParseDuration(fmt.Sprint(value)) if err != nil { return false, err } return e.IncidentAge == age, nil case "incident_severity": - severity, err := event.ParseSeverity(value) + severity, err := event.ParseSeverity(fmt.Sprint(value)) if err != nil { return false, err } @@ -62,17 +62,17 @@ func (e *EscalationFilter) EvalEqual(key string, value string) (bool, error) { } } -func (e *EscalationFilter) EvalLess(key string, value string) (bool, error) { +func (e *EscalationFilter) EvalLess(key, value any) (bool, error) { switch key { case "incident_age": - age, err := time.ParseDuration(value) + age, err := time.ParseDuration(fmt.Sprint(value)) if err != nil { return false, err } return e.IncidentAge < age, nil case "incident_severity": - severity, err := event.ParseSeverity(value) + severity, err := event.ParseSeverity(fmt.Sprint(value)) if err != nil { return false, err } @@ -83,21 +83,21 @@ func (e *EscalationFilter) EvalLess(key string, value string) (bool, error) { } } -func (e *EscalationFilter) EvalLike(key string, value string) (bool, error) { +func (e *EscalationFilter) EvalLike(_, _ any) (bool, error) { return false, fmt.Errorf("escalation filter does not support wildcard matches") } -func (e *EscalationFilter) EvalLessOrEqual(key string, value string) (bool, error) { +func (e *EscalationFilter) EvalLessOrEqual(key, value any) (bool, error) { switch key { case "incident_age": - age, err := time.ParseDuration(value) + age, err := time.ParseDuration(fmt.Sprint(value)) if err != nil { return false, err } return e.IncidentAge <= age, nil case "incident_severity": - severity, err := event.ParseSeverity(value) + severity, err := event.ParseSeverity(fmt.Sprint(value)) if err != nil { return false, err } @@ -108,7 +108,7 @@ func (e *EscalationFilter) EvalLessOrEqual(key string, value string) (bool, erro } } -func (e *EscalationFilter) EvalExists(key string) bool { +func (e *EscalationFilter) EvalExists(key any) bool { switch key { case "incident_age": fallthrough diff --git a/internal/rule/rule.go b/internal/rule/rule.go index a4fa1a49..5ea645cf 100644 --- a/internal/rule/rule.go +++ b/internal/rule/rule.go @@ -1,8 +1,12 @@ package rule import ( + "encoding/json" + "errors" + "github.com/icinga/icinga-go-library/types" "github.com/icinga/icinga-notifications/internal/config/baseconf" + "github.com/icinga/icinga-notifications/internal/filter" "github.com/icinga/icinga-notifications/internal/recipient" "github.com/icinga/icinga-notifications/internal/timeperiod" "go.uber.org/zap/zapcore" @@ -16,12 +20,46 @@ type Rule struct { TimePeriod *timeperiod.TimePeriod `db:"-"` TimePeriodID types.Int `db:"timeperiod_id"` SourceID int64 `db:"source_id"` + ObjectFilter filter.Filter `db:"-"` ObjectFilterExpr types.String `db:"object_filter"` Escalations map[int64]*Escalation `db:"-"` + + // FilterColumns is a set of all filter columns used in the rule's ObjectFilter. + // + // This is computed from the ObjectFilter once and can be used by sources to determine which + // columns they need to provide for the events to be able to evaluate the rule. + FilterColumns FilterAttrsType `db:"-"` } +// FilterAttrsType represents a list of filter attributes for a given list of filter conditions. +type FilterAttrsType [][]string + // IncrementalInitAndValidate implements the config.IncrementalConfigurableInitAndValidatable interface. func (r *Rule) IncrementalInitAndValidate() error { + if r.ObjectFilterExpr.Valid { + data := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(r.ObjectFilterExpr.String), &data); err != nil { + return err + } + filterBytes, exists := data["ast"] + if !exists { + return errors.New("missing 'ast' field in object filter expression") + } + + f, err := filter.UnmarshalJSON(filterBytes) + if err != nil { + return err + } + + r.ObjectFilter = f + if f != nil { + for _, condition := range f.ExtractConditions() { + if attrs, ok := condition.Attributes().([]string); ok { + r.FilterColumns = append(r.FilterColumns, attrs) + } + } + } + } return nil } @@ -41,6 +79,16 @@ func (r *Rule) MarshalLogObject(encoder zapcore.ObjectEncoder) error { return nil } +// Eval evaluates the configured object filter for the provided filterable. +// +// Returns always true if the current rule doesn't have a configured object filter. +func (r *Rule) Eval(filterable filter.Filterable) (bool, error) { + if r.ObjectFilter == nil { + return true, nil + } + return r.ObjectFilter.Eval(filterable) +} + // ContactChannels stores a set of channel IDs for each set of individual contacts. type ContactChannels map[*recipient.Contact]map[int64]bool diff --git a/internal/utils/utils.go b/internal/utils/utils.go index e5938cb1..c529c60b 100644 --- a/internal/utils/utils.go +++ b/internal/utils/utils.go @@ -1,11 +1,14 @@ package utils import ( + "cmp" "context" "fmt" "github.com/icinga/icinga-go-library/database" "github.com/jmoiron/sqlx" "github.com/pkg/errors" + "reflect" + "strings" ) // ExecAndApply applies the provided restoreFunc callback for each successfully retrieved row of the specified type. @@ -53,3 +56,49 @@ func ForEachRow[Row, Id any](ctx context.Context, db *database.DB, idColumn stri return ExecAndApply(ctx, db, stmt, args, restoreFunc) } + +// PrefixWithJSONPathRootSelector ensures that the provided JSONPath expression starts with the root selector "$.". +// +// If the provided path already starts with "$.", it is returned unchanged. +// Otherwise, the root selector is prefixed to the path. +func PrefixWithJSONPathRootSelector(path string) string { + if !strings.HasPrefix(path, "$.") { + return "$." + path + } + return path +} + +// CompareAny compares two values of any type and returns an integer indicating their order (1 if a > b, -1 if a < b, 0 if equal). +func CompareAny(a, b any) (int, error) { + atype := reflect.TypeOf(a) + btype := reflect.TypeOf(b) + + switch atype.Kind() { + case reflect.String: + if btype.ConvertibleTo(atype) { + av := fmt.Sprint(a) + bv := fmt.Sprint(b) + if len(av) > len(bv) { + return 1, nil // a is greater than b + } + if len(av) < len(bv) { + return -1, nil // a is less than b + } + // Both strings have the same length, compare them lexicographically. + return strings.Compare(av, bv), nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if btype.ConvertibleTo(atype) { + return cmp.Compare(reflect.ValueOf(a).Int(), reflect.ValueOf(b).Convert(atype).Int()), nil + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if atype.ConvertibleTo(btype) { + return cmp.Compare(reflect.ValueOf(a).Uint(), reflect.ValueOf(b).Convert(atype).Uint()), nil + } + case reflect.Float32, reflect.Float64: + if atype.ConvertibleTo(btype) { + return cmp.Compare(reflect.ValueOf(a).Float(), reflect.ValueOf(b).Convert(atype).Float()), nil + } + } + return 0, errors.Errorf("cannot compare types %s and %s", atype.Kind(), btype.Kind()) +} diff --git a/schema/mysql/schema.sql b/schema/mysql/schema.sql index 8960e81e..f2ccde7f 100644 --- a/schema/mysql/schema.sql +++ b/schema/mysql/schema.sql @@ -255,15 +255,6 @@ CREATE TABLE object_id_tag ( CONSTRAINT fk_object_id_tag_object FOREIGN KEY (object_id) REFERENCES object(id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -CREATE TABLE object_extra_tag ( - object_id binary(32) NOT NULL, - tag varchar(255) NOT NULL, - value text NOT NULL, - - CONSTRAINT pk_object_extra_tag PRIMARY KEY (object_id, tag), - CONSTRAINT fk_object_extra_tag_object FOREIGN KEY (object_id) REFERENCES object(id) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; - CREATE TABLE event ( id bigint NOT NULL AUTO_INCREMENT, time bigint NOT NULL, diff --git a/schema/mysql/upgrades/001.sql b/schema/mysql/upgrades/001.sql new file mode 100644 index 00000000..e7b1ddbd --- /dev/null +++ b/schema/mysql/upgrades/001.sql @@ -0,0 +1 @@ +DROP TABLE object_extra_tag; diff --git a/schema/pgsql/schema.sql b/schema/pgsql/schema.sql index 791b5702..fba441c2 100644 --- a/schema/pgsql/schema.sql +++ b/schema/pgsql/schema.sql @@ -288,15 +288,6 @@ CREATE TABLE object_id_tag ( CONSTRAINT fk_object_id_tag_object FOREIGN KEY (object_id) REFERENCES object(id) ); -CREATE TABLE object_extra_tag ( - object_id bytea NOT NULL, - tag varchar(255) NOT NULL, - value text NOT NULL, - - CONSTRAINT pk_object_extra_tag PRIMARY KEY (object_id, tag), - CONSTRAINT fk_object_extra_tag_object FOREIGN KEY (object_id) REFERENCES object(id) -); - CREATE TYPE event_type AS ENUM ( 'acknowledgement-cleared', 'acknowledgement-set', diff --git a/schema/pgsql/upgrades/001.sql b/schema/pgsql/upgrades/001.sql new file mode 100644 index 00000000..e7b1ddbd --- /dev/null +++ b/schema/pgsql/upgrades/001.sql @@ -0,0 +1 @@ +DROP TABLE object_extra_tag;