diff --git a/CHANGELOG.md b/CHANGELOG.md index 15bbe1e849e..f9d8ae0dbad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## master / unreleased +* [CHANGE] Config file changed to remove top level `config_store` field in favor of a nested `alertmanager.store.configdb` field. #1686 * [CHANGE] Removed unnecessary `frontend.cache-split-interval` in favor of `querier.split-queries-by-interval` both to reduce configuration complexity and guarantee alignment of these two configs. Starting from now, `-querier.cache-results` may only be enabled in conjunction with `-querier.split-queries-by-interval` (previously the cache interval default was `24h` so if you want to preserve the same behaviour you should set `-querier.split-queries-by-interval=24h`). #2040 * [CHANGE] Removed remaining support for using denormalised tokens in the ring. If you're still running ingesters with denormalised tokens (Cortex 0.4 or earlier, with `-ingester.normalise-tokens=false`), such ingesters will now be completely invisible to distributors and need to be either switched to Cortex 0.6.0 or later, or be configured to use normalised tokens. #2034 * [CHANGE] Moved `--store.min-chunk-age` to the Querier config as `--querier.query-store-after`, allowing the store to be skipped during query time if the metrics wouldn't be found. The YAML config option `ingestermaxquerylookback` has been renamed to `query_ingesters_within` to match its CLI flag. #1893 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 40a2b0ef221..1e09084c64d 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -86,11 +86,6 @@ Supported contents and default values of the config file: # and used by the 'configs' service to expose APIs to manage them. [configdb: ] -# The configstore_config configures the config database storing rules and -# alerts, and is used by the Cortex alertmanager. -# The CLI flags prefix for this block config is: alertmanager -[config_store: ] - # The alertmanager_config configures the Cortex alertmanager. [alertmanager: ] @@ -821,6 +816,22 @@ externalurl: # Root of URL to generate if config is http://internal.monitor # CLI flag: -alertmanager.configs.auto-webhook-root [autowebhookroot: | default = ""] + +store: + # Type of backend to use to store alertmanager configs. Supported values are: + # "configdb", "local". + # CLI flag: -alertmanager.storage.type + [type: | default = "configdb"] + + # The configstore_config configures the config database storing rules and + # alerts, and is used by the Cortex alertmanager. + # The CLI flags prefix for this block config is: alertmanager + [configdb: ] + + local: + # Path at which alertmanager configurations are stored. + # CLI flag: -alertmanager.storage.local.path + [path: | default = ""] ``` ## `table_manager_config` diff --git a/go.sum b/go.sum index a7c69ca9a4c..8983581d42b 100644 --- a/go.sum +++ b/go.sum @@ -71,7 +71,6 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= @@ -99,7 +98,6 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= @@ -375,7 +373,6 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= @@ -454,7 +451,6 @@ github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/serf v0.8.5 h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= @@ -474,7 +470,6 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -652,7 +647,6 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= @@ -1096,12 +1090,10 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6 h1:s9IxTKe9GwDH0S/WaX62nFYr0or32DsTWex9AileL7U= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a h1:uy5HAgt4Ha5rEMbhZA+aM1j2cq5LmR6LQ71EYC2sVH4= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= diff --git a/integration/alertmanager_test.go b/integration/alertmanager_test.go new file mode 100644 index 00000000000..d67bbe972c3 --- /dev/null +++ b/integration/alertmanager_test.go @@ -0,0 +1,41 @@ +package main + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/framework" +) + +func TestAlertmanager(t *testing.T) { + s, err := framework.NewScenario() + require.NoError(t, err) + defer s.Shutdown() + + AlertmanagerConfigs := map[string]string{ + "-alertmanager.storage.local.path": "/integration/alertmanager_test_fixtures/", + "-alertmanager.storage.type": "local", + "-alertmanager.web.external-url": "http://localhost/api/prom", + } + + // Start Cortex components + require.NoError(t, s.StartAlertmanager("alertmanager", AlertmanagerConfigs, "")) + require.NoError(t, s.Service("alertmanager").WaitMetric(80, "cortex_alertmanager_configs", 1)) + + c, err := framework.NewClient("", "", s.Endpoint("alertmanager", 80), "user-1") + require.NoError(t, err) + + status, err := c.GetAlertmanagerStatus(context.Background()) + require.NoError(t, err) + + // Ensure the returned status config matches alertmanager_test_fixtures/user-1.yaml + require.NotNil(t, status) + require.Equal(t, "example_receiver", status.ConfigJSON.Route.Receiver) + require.Len(t, status.ConfigJSON.Route.GroupByStr, 1) + require.Equal(t, "example_groupby", status.ConfigJSON.Route.GroupByStr[0]) + require.Len(t, status.ConfigJSON.Receivers, 1) + require.Equal(t, "example_receiver", status.ConfigJSON.Receivers[0].Name) + require.NoError(t, s.StopService("alertmanager")) +} diff --git a/integration/alertmanager_test_fixtures/user-1.yaml b/integration/alertmanager_test_fixtures/user-1.yaml new file mode 100644 index 00000000000..a40c97baa8b --- /dev/null +++ b/integration/alertmanager_test_fixtures/user-1.yaml @@ -0,0 +1,5 @@ +route: + receiver: "example_receiver" + group_by: ["example_groupby"] +receivers: + - name: "example_receiver" diff --git a/integration/backward_compatibility_test.go b/integration/backward_compatibility_test.go index 52e45946395..3b45d882097 100644 --- a/integration/backward_compatibility_test.go +++ b/integration/backward_compatibility_test.go @@ -44,7 +44,7 @@ func TestBackwardCompatibilityWithChunksStorage(t *testing.T) { now := time.Now() series, expectedVector := generateSeries("series_1", now) - c, err := framework.NewClient(s.Endpoint("distributor", 80), "", "user-1") + c, err := framework.NewClient(s.Endpoint("distributor", 80), "", "", "user-1") require.NoError(t, err) res, err := c.Push(series) @@ -69,7 +69,7 @@ func TestBackwardCompatibilityWithChunksStorage(t *testing.T) { require.NoError(t, s.Service("querier").WaitMetric(80, "cortex_ring_tokens_total", 512)) // Query the series - c, err := framework.NewClient(s.Endpoint("distributor", 80), s.Endpoint("querier", 80), "user-1") + c, err := framework.NewClient(s.Endpoint("distributor", 80), s.Endpoint("querier", 80), "", "user-1") require.NoError(t, err) result, err := c.Query("series_1", now) diff --git a/integration/framework/client.go b/integration/framework/client.go index 4bf879d1a2e..64418566605 100644 --- a/integration/framework/client.go +++ b/integration/framework/client.go @@ -9,6 +9,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" + alertClient "github.com/prometheus/alertmanager/client" promapi "github.com/prometheus/client_golang/api" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/model" @@ -17,6 +18,7 @@ import ( // Client is a client used to interact with Cortex in integration tests type Client struct { + alertmanagerClient promapi.Client distributorAddress string timeout time.Duration httpClient *http.Client @@ -25,7 +27,7 @@ type Client struct { } // NewClient makes a new Cortex client -func NewClient(distributorAddress string, querierAddress string, orgID string) (*Client, error) { +func NewClient(distributorAddress string, querierAddress string, alertmanagerAddress string, orgID string) (*Client, error) { // Create querier API client querierAPIClient, err := promapi.NewClient(promapi.Config{ Address: "http://" + querierAddress + "/api/prom", @@ -43,6 +45,17 @@ func NewClient(distributorAddress string, querierAddress string, orgID string) ( orgID: orgID, } + if alertmanagerAddress != "" { + alertmanagerAPIClient, err := promapi.NewClient(promapi.Config{ + Address: "http://" + alertmanagerAddress + "/api/prom", + RoundTripper: &addOrgIDRoundTripper{orgID: orgID, next: http.DefaultTransport}, + }) + if err != nil { + return nil, err + } + c.alertmanagerClient = alertmanagerAPIClient + } + return c, nil } @@ -95,3 +108,9 @@ func (r *addOrgIDRoundTripper) RoundTrip(req *http.Request) (*http.Response, err return r.next.RoundTrip(req) } + +// GetAlertmanagerStatus gets the status of an alertmanager instance +func (c *Client) GetAlertmanagerStatus(ctx context.Context) (*alertClient.ServerStatus, error) { + statusAPI := alertClient.NewStatusAPI(c.alertmanagerClient) + return statusAPI.Get(ctx) +} diff --git a/integration/framework/scenario.go b/integration/framework/scenario.go index f27f635f4be..60a4cec1e8c 100644 --- a/integration/framework/scenario.go +++ b/integration/framework/scenario.go @@ -219,6 +219,26 @@ func (s *Scenario) StartTableManager(name string, flags map[string]string, image )) } +func (s *Scenario) StartAlertmanager(name string, flags map[string]string, image string) error { + if image == "" { + image = GetDefaultCortexImage() + } + + return s.StartService(NewService( + name, + image, + NetworkName, + []int{80}, + nil, + NewCommandWithoutEntrypoint("cortex", BuildArgs(MergeFlags(map[string]string{ + "-target": "alertmanager", + "-log.level": "warn", + }, flags))...), + // The table-manager doesn't expose a readiness probe, so we just check if the / returns 404 + NewReadinessProbe(80, "/", 404), + )) +} + func (s *Scenario) StopService(name string) error { service := s.Service(name) if service == nil { diff --git a/integration/ingester_flush_test.go b/integration/ingester_flush_test.go index 04b93cdeaf9..d2a140e52f2 100644 --- a/integration/ingester_flush_test.go +++ b/integration/ingester_flush_test.go @@ -42,7 +42,7 @@ func TestIngesterFlushWithChunksStorage(t *testing.T) { require.NoError(t, s.Service("distributor").WaitMetric(80, "cortex_ring_tokens_total", 512)) require.NoError(t, s.Service("querier").WaitMetric(80, "cortex_ring_tokens_total", 512)) - c, err := framework.NewClient(s.Endpoint("distributor", 80), s.Endpoint("querier", 80), "user-1") + c, err := framework.NewClient(s.Endpoint("distributor", 80), s.Endpoint("querier", 80), "", "user-1") require.NoError(t, err) // Push some series to Cortex diff --git a/integration/ingester_hand_over_test.go b/integration/ingester_hand_over_test.go index 9890ae3df8b..7cda211d0d8 100644 --- a/integration/ingester_hand_over_test.go +++ b/integration/ingester_hand_over_test.go @@ -58,7 +58,7 @@ func runIngesterHandOverTest(t *testing.T, flags map[string]string, setup func(t require.NoError(t, s.Service("distributor").WaitMetric(80, "cortex_ring_tokens_total", 512)) require.NoError(t, s.Service("querier").WaitMetric(80, "cortex_ring_tokens_total", 512)) - c, err := framework.NewClient(s.Endpoint("distributor", 80), s.Endpoint("querier", 80), "user-1") + c, err := framework.NewClient(s.Endpoint("distributor", 80), s.Endpoint("querier", 80), "", "user-1") require.NoError(t, err) // Push some series to Cortex diff --git a/pkg/alertmanager/alertmanager.go b/pkg/alertmanager/alertmanager.go index 6371de0b3bb..4e7c8bcabd3 100644 --- a/pkg/alertmanager/alertmanager.go +++ b/pkg/alertmanager/alertmanager.go @@ -191,8 +191,15 @@ func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config) error { am.api.Update(conf, func(_ model.LabelSet) {}) - am.inhibitor.Stop() - am.dispatcher.Stop() + // Ensure dispatcher is set before being called + if am.dispatcher != nil { + am.dispatcher.Stop() + } + + // Ensure inhibitor is set before being called + if am.inhibitor != nil { + am.inhibitor.Stop() + } am.inhibitor = inhibit.NewInhibitor(am.alerts, conf.InhibitRules, am.marker, log.With(am.logger, "component", "inhibitor")) diff --git a/pkg/alertmanager/alerts/alerts.pb.go b/pkg/alertmanager/alerts/alerts.pb.go new file mode 100644 index 00000000000..016c42a4162 --- /dev/null +++ b/pkg/alertmanager/alerts/alerts.pb.go @@ -0,0 +1,801 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: alerts.proto + +package alerts + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type AlertConfigDesc struct { + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + RawConfig string `protobuf:"bytes,2,opt,name=raw_config,json=rawConfig,proto3" json:"raw_config,omitempty"` + Templates []*TemplateDesc `protobuf:"bytes,3,rep,name=templates,proto3" json:"templates,omitempty"` +} + +func (m *AlertConfigDesc) Reset() { *m = AlertConfigDesc{} } +func (*AlertConfigDesc) ProtoMessage() {} +func (*AlertConfigDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_20493709c38b81dc, []int{0} +} +func (m *AlertConfigDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlertConfigDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AlertConfigDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AlertConfigDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertConfigDesc.Merge(m, src) +} +func (m *AlertConfigDesc) XXX_Size() int { + return m.Size() +} +func (m *AlertConfigDesc) XXX_DiscardUnknown() { + xxx_messageInfo_AlertConfigDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertConfigDesc proto.InternalMessageInfo + +func (m *AlertConfigDesc) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AlertConfigDesc) GetRawConfig() string { + if m != nil { + return m.RawConfig + } + return "" +} + +func (m *AlertConfigDesc) GetTemplates() []*TemplateDesc { + if m != nil { + return m.Templates + } + return nil +} + +type TemplateDesc struct { + Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` + Body string `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (m *TemplateDesc) Reset() { *m = TemplateDesc{} } +func (*TemplateDesc) ProtoMessage() {} +func (*TemplateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_20493709c38b81dc, []int{1} +} +func (m *TemplateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TemplateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TemplateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateDesc.Merge(m, src) +} +func (m *TemplateDesc) XXX_Size() int { + return m.Size() +} +func (m *TemplateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateDesc proto.InternalMessageInfo + +func (m *TemplateDesc) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *TemplateDesc) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func init() { + proto.RegisterType((*AlertConfigDesc)(nil), "alerts.AlertConfigDesc") + proto.RegisterType((*TemplateDesc)(nil), "alerts.TemplateDesc") +} + +func init() { proto.RegisterFile("alerts.proto", fileDescriptor_20493709c38b81dc) } + +var fileDescriptor_20493709c38b81dc = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xcc, 0x49, 0x2d, + 0x2a, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0, 0xa4, 0x74, 0xd3, 0x33, + 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xd2, + 0x49, 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0xb4, 0x29, 0x55, 0x70, 0xf1, 0x3b, 0x82, + 0x34, 0x3a, 0xe7, 0xe7, 0xa5, 0x65, 0xa6, 0xbb, 0xa4, 0x16, 0x27, 0x0b, 0x09, 0x71, 0xb1, 0x94, + 0x16, 0xa7, 0x16, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0xb2, 0x5c, 0x5c, + 0x45, 0x89, 0xe5, 0xf1, 0xc9, 0x60, 0x55, 0x12, 0x4c, 0x60, 0x19, 0xce, 0xa2, 0xc4, 0x72, 0x88, + 0x36, 0x21, 0x23, 0x2e, 0xce, 0x92, 0xd4, 0xdc, 0x82, 0x9c, 0xc4, 0x92, 0xd4, 0x62, 0x09, 0x66, + 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x11, 0x3d, 0xa8, 0xf3, 0x42, 0xa0, 0x12, 0x20, 0xb3, 0x83, 0x10, + 0xca, 0x94, 0xec, 0xb8, 0x78, 0x90, 0xa5, 0x84, 0xa4, 0xb8, 0x38, 0xd2, 0x32, 0x73, 0x52, 0xf3, + 0x12, 0x73, 0x53, 0xa1, 0x56, 0xc3, 0xf9, 0x20, 0x27, 0x25, 0xe5, 0xa7, 0x54, 0x42, 0x2d, 0x06, + 0xb3, 0x9d, 0x4c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, + 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, + 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xbd, 0x6d, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x90, 0x9e, 0xed, 0x3d, 0x01, 0x00, 0x00, +} + +func (this *AlertConfigDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AlertConfigDesc) + if !ok { + that2, ok := that.(AlertConfigDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.User != that1.User { + return false + } + if this.RawConfig != that1.RawConfig { + return false + } + if len(this.Templates) != len(that1.Templates) { + return false + } + for i := range this.Templates { + if !this.Templates[i].Equal(that1.Templates[i]) { + return false + } + } + return true +} +func (this *TemplateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TemplateDesc) + if !ok { + that2, ok := that.(TemplateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Filename != that1.Filename { + return false + } + if this.Body != that1.Body { + return false + } + return true +} +func (this *AlertConfigDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&alerts.AlertConfigDesc{") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "RawConfig: "+fmt.Sprintf("%#v", this.RawConfig)+",\n") + if this.Templates != nil { + s = append(s, "Templates: "+fmt.Sprintf("%#v", this.Templates)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TemplateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&alerts.TemplateDesc{") + s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") + s = append(s, "Body: "+fmt.Sprintf("%#v", this.Body)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAlerts(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *AlertConfigDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlertConfigDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAlerts(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.RawConfig) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAlerts(dAtA, i, uint64(len(m.RawConfig))) + i += copy(dAtA[i:], m.RawConfig) + } + if len(m.Templates) > 0 { + for _, msg := range m.Templates { + dAtA[i] = 0x1a + i++ + i = encodeVarintAlerts(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TemplateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateDesc) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Filename) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAlerts(dAtA, i, uint64(len(m.Filename))) + i += copy(dAtA[i:], m.Filename) + } + if len(m.Body) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAlerts(dAtA, i, uint64(len(m.Body))) + i += copy(dAtA[i:], m.Body) + } + return i, nil +} + +func encodeVarintAlerts(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AlertConfigDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + l = len(m.RawConfig) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + if len(m.Templates) > 0 { + for _, e := range m.Templates { + l = e.Size() + n += 1 + l + sovAlerts(uint64(l)) + } + } + return n +} + +func (m *TemplateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filename) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + l = len(m.Body) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + return n +} + +func sovAlerts(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAlerts(x uint64) (n int) { + return sovAlerts(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AlertConfigDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AlertConfigDesc{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `RawConfig:` + fmt.Sprintf("%v", this.RawConfig) + `,`, + `Templates:` + strings.Replace(fmt.Sprintf("%v", this.Templates), "TemplateDesc", "TemplateDesc", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateDesc{`, + `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, + `Body:` + fmt.Sprintf("%v", this.Body) + `,`, + `}`, + }, "") + return s +} +func valueToStringAlerts(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AlertConfigDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlertConfigDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlertConfigDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Templates = append(m.Templates, &TemplateDesc{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAlerts(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Body = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAlerts(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAlerts(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAlerts + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthAlerts + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAlerts(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthAlerts + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAlerts = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAlerts = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/alertmanager/alerts/alerts.proto b/pkg/alertmanager/alerts/alerts.proto new file mode 100644 index 00000000000..8626ec981c5 --- /dev/null +++ b/pkg/alertmanager/alerts/alerts.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package alerts; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message AlertConfigDesc { + string user = 1; + string raw_config = 2; + + repeated TemplateDesc templates = 3; +} + +message TemplateDesc { + string filename = 1; + string body = 2; +} \ No newline at end of file diff --git a/pkg/alertmanager/alerts/configdb/store.go b/pkg/alertmanager/alerts/configdb/store.go new file mode 100644 index 00000000000..786ca29a461 --- /dev/null +++ b/pkg/alertmanager/alerts/configdb/store.go @@ -0,0 +1,60 @@ +package configdb + +import ( + "context" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/configs" + "github.com/cortexproject/cortex/pkg/configs/client" +) + +// Store is a concrete implementation of RuleStore that sources rules from the config service +type Store struct { + configClient client.Client + since configs.ID + alertConfigs map[string]alerts.AlertConfigDesc +} + +// NewStore constructs a Store +func NewStore(c client.Client) *Store { + return &Store{ + configClient: c, + since: 0, + alertConfigs: make(map[string]alerts.AlertConfigDesc), + } +} + +// ListAlertConfigs implements RuleStore +func (c *Store) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + + configs, err := c.configClient.GetAlerts(ctx, c.since) + + if err != nil { + return nil, err + } + + for user, cfg := range configs.Configs { + if cfg.IsDeleted() { + delete(c.alertConfigs, user) + continue + } + + var templates []*alerts.TemplateDesc + for fn, template := range cfg.Config.TemplateFiles { + templates = append(templates, &alerts.TemplateDesc{ + Filename: fn, + Body: template, + }) + } + + c.alertConfigs[user] = alerts.AlertConfigDesc{ + User: user, + RawConfig: cfg.Config.AlertmanagerConfig, + Templates: templates, + } + } + + c.since = configs.GetLatestConfigID() + + return c.alertConfigs, nil +} diff --git a/pkg/alertmanager/alerts/local/store.go b/pkg/alertmanager/alerts/local/store.go new file mode 100644 index 00000000000..6f04fd4b846 --- /dev/null +++ b/pkg/alertmanager/alerts/local/store.go @@ -0,0 +1,75 @@ +package local + +import ( + "context" + "flag" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/prometheus/alertmanager/config" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" +) + +// StoreConfig configures a static file alertmanager store +type StoreConfig struct { + Path string `yaml:"path"` +} + +// RegisterFlags registers flags related to the alertmanager file store +func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.Path, "alertmanager.storage.local.path", "", "Path at which alertmanager configurations are stored.") +} + +// Store is used to load user alertmanager configs from a local disk +type Store struct { + cfg StoreConfig +} + +// NewStore returns a new file alert store. +func NewStore(cfg StoreConfig) (*Store, error) { + return &Store{cfg}, nil +} + +// ListAlertConfigs returns a list of each users alertmanager config. +func (f *Store) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + configs := map[string]alerts.AlertConfigDesc{} + err := filepath.Walk(f.cfg.Path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Ignore files that are directories or not yaml files + ext := filepath.Ext(info.Name()) + if info.IsDir() || (ext != ".yml" && ext != ".yaml") { + return nil + } + + _, err = config.LoadFile(f.cfg.Path + info.Name()) + if err != nil { + return err + } + + content, err := ioutil.ReadFile(f.cfg.Path + info.Name()) + if err != nil { + return err + } + + // The file name must correspond to the user tenant ID + user := strings.TrimSuffix(info.Name(), ext) + + configs[user] = alerts.AlertConfigDesc{ + User: user, + RawConfig: string(content), + } + return nil + }) + + if err != nil { + return nil, err + } + + return configs, nil +} diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 69df130f2e9..7373b7383e5 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -21,8 +21,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/configs" - configs_client "github.com/cortexproject/cortex/pkg/configs/client" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -67,11 +66,11 @@ const ( ) var ( - totalConfigs = prometheus.NewGauge(prometheus.GaugeOpts{ + totalConfigs = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cortex", Name: "alertmanager_configs", Help: "How many configs the multitenant alertmanager knows about.", - }) + }, []string{"status"}) statusTemplate *template.Template ) @@ -101,6 +100,8 @@ type MultitenantAlertmanagerConfig struct { FallbackConfigFile string AutoWebhookRoot string + + Store AlertStoreConfig } const defaultClusterAddr = "0.0.0.0:9094" @@ -120,6 +121,8 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.ClusterAdvertiseAddr, "cluster.advertise-address", "", "Explicit address to advertise in cluster.") f.Var(&cfg.Peers, "cluster.peer", "Initial peers (may be repeated).") f.DurationVar(&cfg.PeerTimeout, "cluster.peer-timeout", time.Second*15, "Time to wait between peers to send notifications.") + + cfg.Store.RegisterFlags(f) } // A MultitenantAlertmanager manages Alertmanager instances for multiple @@ -127,7 +130,7 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { type MultitenantAlertmanager struct { cfg *MultitenantAlertmanagerConfig - configsAPI configs_client.Client + store AlertStore // The fallback config is stored as a string and parsed every time it's needed // because we mutate the parsed results and don't want those changes to take @@ -135,13 +138,12 @@ type MultitenantAlertmanager struct { fallbackConfig string // All the organization configurations that we have. Only used for instrumentation. - cfgs map[string]configs.Config + cfgs map[string]alerts.AlertConfigDesc alertmanagersMtx sync.Mutex alertmanagers map[string]*Alertmanager - latestConfig configs.ID - latestMutex sync.RWMutex + logger log.Logger peer *cluster.Peer @@ -150,17 +152,12 @@ type MultitenantAlertmanager struct { } // NewMultitenantAlertmanager creates a new MultitenantAlertmanager. -func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, cfgCfg configs_client.Config) (*MultitenantAlertmanager, error) { +func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, logger log.Logger) (*MultitenantAlertmanager, error) { err := os.MkdirAll(cfg.DataDir, 0777) if err != nil { return nil, fmt.Errorf("unable to create Alertmanager data directory %q: %s", cfg.DataDir, err) } - configsAPI, err := configs_client.New(cfgCfg) - if err != nil { - return nil, err - } - var fallbackConfig []byte if cfg.FallbackConfigFile != "" { fallbackConfig, err = ioutil.ReadFile(cfg.FallbackConfigFile) @@ -176,7 +173,7 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, cfgCfg confi var peer *cluster.Peer if cfg.ClusterBindAddr != "" { peer, err = cluster.Create( - log.With(util.Logger, "component", "cluster"), + log.With(logger, "component", "cluster"), prometheus.DefaultRegisterer, cfg.ClusterBindAddr, cfg.ClusterAdvertiseAddr, @@ -193,18 +190,24 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, cfgCfg confi } err = peer.Join(cluster.DefaultReconnectInterval, cluster.DefaultReconnectTimeout) if err != nil { - level.Warn(util.Logger).Log("msg", "unable to join gossip mesh", "err", err) + level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err) } go peer.Settle(context.Background(), cluster.DefaultGossipInterval) } + store, err := NewAlertStore(cfg.Store) + if err != nil { + return nil, err + } + am := &MultitenantAlertmanager{ cfg: cfg, - configsAPI: configsAPI, fallbackConfig: string(fallbackConfig), - cfgs: map[string]configs.Config{}, + cfgs: map[string]alerts.AlertConfigDesc{}, alertmanagers: map[string]*Alertmanager{}, peer: peer, + store: store, + logger: log.With(logger, "component", "MultiTenantAlertmanager"), stop: make(chan struct{}), done: make(chan struct{}), } @@ -216,14 +219,14 @@ func (am *MultitenantAlertmanager) Run() { defer close(am.done) // Load initial set of all configurations before polling for new ones. - am.addNewConfigs(am.loadAllConfigs()) + am.syncConfigs(am.loadAllConfigs()) ticker := time.NewTicker(am.cfg.PollInterval) for { select { - case now := <-ticker.C: - err := am.updateConfigs(now) + case <-ticker.C: + err := am.updateConfigs() if err != nil { - level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: error updating configs", "err", err) + level.Warn(am.logger).Log("msg", "error updating configs", "err", err) } case <-am.stop: ticker.Stop() @@ -243,65 +246,70 @@ func (am *MultitenantAlertmanager) Stop() { am.alertmanagersMtx.Unlock() err := am.peer.Leave(am.cfg.PeerTimeout) if err != nil { - level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: failed to leave the cluster", "err", err) + level.Warn(am.logger).Log("msg", "failed to leave the cluster", "err", err) } - level.Debug(util.Logger).Log("msg", "MultitenantAlertmanager stopped") + level.Debug(am.logger).Log("msg", "stopping") } -// Load the full set of configurations from the server, retrying with backoff +// Load the full set of configurations from the alert store, retrying with backoff // until we can get them. -func (am *MultitenantAlertmanager) loadAllConfigs() map[string]configs.View { +func (am *MultitenantAlertmanager) loadAllConfigs() map[string]alerts.AlertConfigDesc { backoff := util.NewBackoff(context.Background(), backoffConfig) for { cfgs, err := am.poll() if err == nil { - level.Debug(util.Logger).Log("msg", "MultitenantAlertmanager: initial configuration load", "num_configs", len(cfgs)) + level.Debug(am.logger).Log("msg", "initial configuration load", "num_configs", len(cfgs)) return cfgs } - level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: error fetching all configurations, backing off", "err", err) + level.Warn(am.logger).Log("msg", "error fetching all configurations, backing off", "err", err) backoff.Wait() } } -func (am *MultitenantAlertmanager) updateConfigs(now time.Time) error { +func (am *MultitenantAlertmanager) updateConfigs() error { cfgs, err := am.poll() if err != nil { return err } - am.addNewConfigs(cfgs) + am.syncConfigs(cfgs) return nil } -// poll the configuration server. Not re-entrant. -func (am *MultitenantAlertmanager) poll() (map[string]configs.View, error) { - configID := am.latestConfig - cfgs, err := am.configsAPI.GetAlerts(context.Background(), configID) +// poll the alert store. Not re-entrant. +func (am *MultitenantAlertmanager) poll() (map[string]alerts.AlertConfigDesc, error) { + cfgs, err := am.store.ListAlertConfigs(context.Background()) if err != nil { - level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: configs server poll failed", "err", err) + level.Warn(am.logger).Log("msg", "configs alert store poll failed", "err", err) return nil, err } - am.latestMutex.Lock() - am.latestConfig = cfgs.GetLatestConfigID() - am.latestMutex.Unlock() - return cfgs.Configs, nil + return cfgs, nil } -func (am *MultitenantAlertmanager) addNewConfigs(cfgs map[string]configs.View) { - // TODO: instrument how many configs we have, both valid & invalid. - level.Debug(util.Logger).Log("msg", "adding configurations", "num_configs", len(cfgs)) - for userID, config := range cfgs { - if config.IsDeleted() { - am.deleteUser(userID) - continue - } - err := am.setConfig(userID, config.Config) +func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfigDesc) { + invalid := 0 // Count the number of invalid configs as we go. + + level.Debug(am.logger).Log("msg", "adding configurations", "num_configs", len(cfgs)) + for _, cfg := range cfgs { + err := am.setConfig(cfg) if err != nil { - level.Warn(util.Logger).Log("msg", "MultitenantAlertmanager: error applying config", "err", err) + invalid++ + level.Warn(am.logger).Log("msg", "error applying config", "err", err) continue } + } + am.alertmanagersMtx.Lock() + defer am.alertmanagersMtx.Unlock() + for user, userAM := range am.alertmanagers { + if _, exists := cfgs[user]; !exists { + userAM.Stop() + delete(am.alertmanagers, user) + delete(am.cfgs, user) + level.Info(am.logger).Log("msg", "deleting alertmanager", "user", user) + } } - totalConfigs.Set(float64(len(am.cfgs))) + totalConfigs.WithLabelValues("invalid").Set(float64(invalid)) + totalConfigs.WithLabelValues("valid").Set(float64(len(am.alertmanagers) - invalid)) } func (am *MultitenantAlertmanager) transformConfig(userID string, amConfig *amconfig.Config) (*amconfig.Config, error) { @@ -347,16 +355,16 @@ func (am *MultitenantAlertmanager) createTemplatesFile(userID, fn, content strin // setConfig applies the given configuration to the alertmanager for `userID`, // creating an alertmanager if it doesn't already exist. -func (am *MultitenantAlertmanager) setConfig(userID string, config configs.Config) error { +func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { am.alertmanagersMtx.Lock() - existing, hasExisting := am.alertmanagers[userID] + existing, hasExisting := am.alertmanagers[cfg.User] am.alertmanagersMtx.Unlock() - var amConfig *amconfig.Config + var userAmConfig *amconfig.Config var err error var hasTemplateChanges bool - for fn, content := range config.TemplateFiles { - hasChanged, err := am.createTemplatesFile(userID, fn, content) + for _, tmpl := range cfg.Templates { + hasChanged, err := am.createTemplatesFile(cfg.User, tmpl.Filename, tmpl.Body) if err != nil { return err } @@ -366,70 +374,55 @@ func (am *MultitenantAlertmanager) setConfig(userID string, config configs.Confi } } - if config.AlertmanagerConfig == "" { + level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User) + + if cfg.RawConfig == "" { if am.fallbackConfig == "" { - return fmt.Errorf("blank Alertmanager configuration for %v", userID) + return fmt.Errorf("blank Alertmanager configuration for %v", cfg.User) } - level.Info(util.Logger).Log("msg", "blank Alertmanager configuration; using fallback", "user_id", userID) - amConfig, err = amconfig.Load(am.fallbackConfig) + level.Info(am.logger).Log("msg", "blank Alertmanager configuration; using fallback", "user_id", cfg.User) + userAmConfig, err = amconfig.Load(am.fallbackConfig) if err != nil { - return fmt.Errorf("unable to load fallback configuration for %v: %v", userID, err) + return fmt.Errorf("unable to load fallback configuration for %v: %v", cfg.User, err) } } else { - amConfig, err = alertmanagerConfigFromConfig(config) + userAmConfig, err = amconfig.Load(cfg.RawConfig) if err != nil && hasExisting { // XXX: This means that if a user has a working configuration and // they submit a broken one, we'll keep processing the last known // working configuration, and they'll never know. // TODO: Provide a way of communicating this to the user and for removing // Alertmanager instances. - return fmt.Errorf("invalid Cortex configuration for %v: %v", userID, err) + return fmt.Errorf("invalid Cortex configuration for %v: %v", cfg.User, err) } } - if amConfig, err = am.transformConfig(userID, amConfig); err != nil { + if userAmConfig, err = am.transformConfig(cfg.User, userAmConfig); err != nil { return err } // If no Alertmanager instance exists for this user yet, start one. if !hasExisting { - newAM, err := am.newAlertmanager(userID, amConfig) + level.Debug(am.logger).Log("msg", "initializing new alertmanager tenant", "user", cfg.User) + newAM, err := am.newAlertmanager(cfg.User, userAmConfig) if err != nil { return err } am.alertmanagersMtx.Lock() - am.alertmanagers[userID] = newAM + am.alertmanagers[cfg.User] = newAM am.alertmanagersMtx.Unlock() - } else if am.cfgs[userID].AlertmanagerConfig != config.AlertmanagerConfig || hasTemplateChanges { + } else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges { + level.Info(am.logger).Log("msg", "updating new alertmanager tenant", "user", cfg.User) // If the config changed, apply the new one. - err := existing.ApplyConfig(userID, amConfig) + err := existing.ApplyConfig(cfg.User, userAmConfig) if err != nil { - return fmt.Errorf("unable to apply Alertmanager config for user %v: %v", userID, err) + return fmt.Errorf("unable to apply Alertmanager config for user %v: %v", cfg.User, err) } } - am.cfgs[userID] = config + am.cfgs[cfg.User] = cfg return nil } -// alertmanagerConfigFromConfig returns the Alertmanager config from the Cortex configuration. -func alertmanagerConfigFromConfig(c configs.Config) (*amconfig.Config, error) { - cfg, err := amconfig.Load(c.AlertmanagerConfig) - if err != nil { - return nil, fmt.Errorf("error parsing Alertmanager config: %s", err) - } - return cfg, nil -} - -func (am *MultitenantAlertmanager) deleteUser(userID string) { - am.alertmanagersMtx.Lock() - if existing, hasExisting := am.alertmanagers[userID]; hasExisting { - existing.Stop() - } - delete(am.alertmanagers, userID) - delete(am.cfgs, userID) - am.alertmanagersMtx.Unlock() -} - func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amconfig.Config) (*Alertmanager, error) { newAM, err := New(&Config{ UserID: userID, diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go new file mode 100644 index 00000000000..e646dadff26 --- /dev/null +++ b/pkg/alertmanager/multitenant_test.go @@ -0,0 +1,117 @@ +package alertmanager + +import ( + "context" + "io/ioutil" + "os" + "sync" + "testing" + + "github.com/go-kit/kit/log" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +var ( + simpleConfigOne = `route: + receiver: dummy + +receivers: + - name: dummy` + + simpleConfigTwo = `route: + receiver: dummy + +receivers: + - name: dummy` +) + +// basic easily configurable mock +type mockAlertStore struct { + configs map[string]alerts.AlertConfigDesc +} + +func (m *mockAlertStore) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + return m.configs, nil +} + +func TestMultitenantAlertmanager_loadAllConfigs(t *testing.T) { + mockStore := &mockAlertStore{ + configs: map[string]alerts.AlertConfigDesc{ + "user1": { + User: "user1", + RawConfig: simpleConfigOne, + Templates: []*alerts.TemplateDesc{}, + }, + "user2": { + User: "user2", + RawConfig: simpleConfigOne, + Templates: []*alerts.TemplateDesc{}, + }, + }, + } + + externalURL := flagext.URLValue{} + err := externalURL.Set("http://localhost/api/prom") + require.NoError(t, err) + + tempDir, err := ioutil.TempDir(os.TempDir(), "alertmanager") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + am := &MultitenantAlertmanager{ + cfg: &MultitenantAlertmanagerConfig{ + ExternalURL: externalURL, + DataDir: tempDir, + }, + store: mockStore, + cfgs: map[string]alerts.AlertConfigDesc{}, + alertmanagersMtx: sync.Mutex{}, + alertmanagers: map[string]*Alertmanager{}, + logger: log.NewNopLogger(), + stop: make(chan struct{}), + done: make(chan struct{}), + } + + // Ensure the configs are synced correctly + require.NoError(t, am.updateConfigs()) + + require.Len(t, am.alertmanagers, 2) + + currentConfig, exists := am.cfgs["user1"] + require.True(t, exists) + require.Equal(t, simpleConfigOne, currentConfig.RawConfig) + + // Ensure when a 3rd config is added, it is synced correctly + mockStore.configs["user3"] = alerts.AlertConfigDesc{ + User: "user3", + RawConfig: simpleConfigOne, + Templates: []*alerts.TemplateDesc{}, + } + + require.NoError(t, am.updateConfigs()) + + require.Len(t, am.alertmanagers, 3) + + // Ensure the config is updated + mockStore.configs["user1"] = alerts.AlertConfigDesc{ + User: "user1", + RawConfig: simpleConfigTwo, + Templates: []*alerts.TemplateDesc{}, + } + + require.NoError(t, am.updateConfigs()) + + currentConfig, exists = am.cfgs["user1"] + require.True(t, exists) + require.Equal(t, simpleConfigTwo, currentConfig.RawConfig) + + // Test Delete User + delete(mockStore.configs, "user3") + require.NoError(t, am.updateConfigs()) + currentConfig, exists = am.cfgs["user3"] + require.False(t, exists) + require.Equal(t, alerts.AlertConfigDesc{}, currentConfig) +} diff --git a/pkg/alertmanager/storage.go b/pkg/alertmanager/storage.go new file mode 100644 index 00000000000..7a94b676114 --- /dev/null +++ b/pkg/alertmanager/storage.go @@ -0,0 +1,47 @@ +package alertmanager + +import ( + "context" + "flag" + "fmt" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts/local" + "github.com/cortexproject/cortex/pkg/configs/client" +) + +// AlertStore stores and configures users rule configs +type AlertStore interface { + ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) +} + +// AlertStoreConfig configures the alertmanager backend +type AlertStoreConfig struct { + Type string `yaml:"type"` + ConfigDB client.Config `yaml:"configdb"` + Local local.StoreConfig `yaml:"local"` +} + +// RegisterFlags registers flags. +func (cfg *AlertStoreConfig) RegisterFlags(f *flag.FlagSet) { + cfg.Local.RegisterFlags(f) + cfg.ConfigDB.RegisterFlagsWithPrefix("alertmanager.", f) + f.StringVar(&cfg.Type, "alertmanager.storage.type", "configdb", "Type of backend to use to store alertmanager configs. Supported values are: \"configdb\", \"local\".") +} + +// NewAlertStore returns a new rule storage backend poller and store +func NewAlertStore(cfg AlertStoreConfig) (AlertStore, error) { + switch cfg.Type { + case "configdb": + c, err := client.New(cfg.ConfigDB) + if err != nil { + return nil, err + } + return configdb.NewStore(c), nil + case "local": + return local.NewStore(cfg.Local) + default: + return nil, fmt.Errorf("unrecognized alertmanager storage backend %v, choose one of: \"configdb\", \"local\"", cfg.Type) + } +} diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 78989ff3083..7c932d5c3e6 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -21,7 +21,6 @@ import ( chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/compactor" "github.com/cortexproject/cortex/pkg/configs/api" - config_client "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ingester" @@ -82,7 +81,6 @@ type Config struct { Ruler ruler.Config `yaml:"ruler,omitempty"` ConfigDB db.Config `yaml:"configdb,omitempty"` - ConfigStore config_client.Config `yaml:"config_store,omitempty"` Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager,omitempty"` RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config,omitempty"` MemberlistKV memberlist.KVConfig `yaml:"memberlist"` @@ -118,7 +116,6 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Ruler.RegisterFlags(f) c.ConfigDB.RegisterFlags(f) - c.ConfigStore.RegisterFlagsWithPrefix("alertmanager.", f) c.Alertmanager.RegisterFlags(f) c.RuntimeConfig.RegisterFlags(f) c.MemberlistKV.RegisterFlags(f, "") diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 2ef73b6e021..8b8734b1fd6 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -478,7 +478,7 @@ func (t *Cortex) stopConfigs() error { } func (t *Cortex) initAlertmanager(cfg *Config) (err error) { - t.alertmanager, err = alertmanager.NewMultitenantAlertmanager(&cfg.Alertmanager, cfg.ConfigStore) + t.alertmanager, err = alertmanager.NewMultitenantAlertmanager(&cfg.Alertmanager, util.Logger) if err != nil { return err } diff --git a/vendor/github.com/prometheus/alertmanager/client/client.go b/vendor/github.com/prometheus/alertmanager/client/client.go new file mode 100644 index 00000000000..72690033db0 --- /dev/null +++ b/vendor/github.com/prometheus/alertmanager/client/client.go @@ -0,0 +1,349 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/prometheus/client_golang/api" + + "github.com/prometheus/alertmanager/config" + "github.com/prometheus/alertmanager/types" +) + +const ( + apiPrefix = "/api/v1" + + epStatus = apiPrefix + "/status" + epSilence = apiPrefix + "/silence/:id" + epSilences = apiPrefix + "/silences" + epAlerts = apiPrefix + "/alerts" + + statusSuccess = "success" + statusError = "error" +) + +// ServerStatus represents the status of the AlertManager endpoint. +type ServerStatus struct { + ConfigYAML string `json:"configYAML"` + ConfigJSON *config.Config `json:"configJSON"` + VersionInfo map[string]string `json:"versionInfo"` + Uptime time.Time `json:"uptime"` + ClusterStatus *ClusterStatus `json:"clusterStatus"` +} + +// PeerStatus represents the status of a peer in the cluster. +type PeerStatus struct { + Name string `json:"name"` + Address string `json:"address"` +} + +// ClusterStatus represents the status of the cluster. +type ClusterStatus struct { + Name string `json:"name"` + Status string `json:"status"` + Peers []PeerStatus `json:"peers"` +} + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient struct { + api.Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data,omitempty"` + ErrorType string `json:"errorType,omitempty"` + Error string `json:"error,omitempty"` +} + +type clientError struct { + code int + msg string +} + +func (e *clientError) Error() string { + return fmt.Sprintf("%s (code: %d)", e.msg, e.code) +} + +func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, api.Warnings, error) { + resp, body, warnings, err := c.Client.Do(ctx, req) + if err != nil { + return resp, body, warnings, err + } + + code := resp.StatusCode + + var result apiResponse + if err = json.Unmarshal(body, &result); err != nil { + // Pass the returned body rather than the JSON error because some API + // endpoints return plain text instead of JSON payload. + return resp, body, warnings, &clientError{ + code: code, + msg: string(body), + } + } + + if (code/100 == 2) && (result.Status != statusSuccess) { + return resp, body, warnings, &clientError{ + code: code, + msg: "inconsistent body for response code", + } + } + + if result.Status == statusError { + err = &clientError{ + code: code, + msg: result.Error, + } + } + + return resp, []byte(result.Data), warnings, err +} + +// StatusAPI provides bindings for the Alertmanager's status API. +type StatusAPI interface { + // Get returns the server's configuration, version, uptime and cluster information. + Get(ctx context.Context) (*ServerStatus, error) +} + +// NewStatusAPI returns a status API client. +func NewStatusAPI(c api.Client) StatusAPI { + return &httpStatusAPI{client: apiClient{c}} +} + +type httpStatusAPI struct { + client api.Client +} + +func (h *httpStatusAPI) Get(ctx context.Context) (*ServerStatus, error) { + u := h.client.URL(epStatus, nil) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + + _, body, _, err := h.client.Do(ctx, req) // Ignoring warnings. + if err != nil { + return nil, err + } + + var ss *ServerStatus + err = json.Unmarshal(body, &ss) + + return ss, err +} + +// AlertAPI provides bindings for the Alertmanager's alert API. +type AlertAPI interface { + // List returns all the active alerts. + List(ctx context.Context, filter, receiver string, silenced, inhibited, active, unprocessed bool) ([]*ExtendedAlert, error) + // Push sends a list of alerts to the Alertmanager. + Push(ctx context.Context, alerts ...Alert) error +} + +// Alert represents an alert as expected by the AlertManager's push alert API. +type Alert struct { + Labels LabelSet `json:"labels"` + Annotations LabelSet `json:"annotations"` + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// ExtendedAlert represents an alert as returned by the AlertManager's list alert API. +type ExtendedAlert struct { + Alert + Status types.AlertStatus `json:"status"` + Receivers []string `json:"receivers"` + Fingerprint string `json:"fingerprint"` +} + +// LabelSet represents a collection of label names and values as a map. +type LabelSet map[LabelName]LabelValue + +// LabelName represents the name of a label. +type LabelName string + +// LabelValue represents the value of a label. +type LabelValue string + +// NewAlertAPI returns a new AlertAPI for the client. +func NewAlertAPI(c api.Client) AlertAPI { + return &httpAlertAPI{client: apiClient{c}} +} + +type httpAlertAPI struct { + client api.Client +} + +func (h *httpAlertAPI) List(ctx context.Context, filter, receiver string, silenced, inhibited, active, unprocessed bool) ([]*ExtendedAlert, error) { + u := h.client.URL(epAlerts, nil) + params := url.Values{} + if filter != "" { + params.Add("filter", filter) + } + params.Add("silenced", fmt.Sprintf("%t", silenced)) + params.Add("inhibited", fmt.Sprintf("%t", inhibited)) + params.Add("active", fmt.Sprintf("%t", active)) + params.Add("unprocessed", fmt.Sprintf("%t", unprocessed)) + params.Add("receiver", receiver) + u.RawQuery = params.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + + _, body, _, err := h.client.Do(ctx, req) // ignoring warnings. + if err != nil { + return nil, err + } + + var alts []*ExtendedAlert + err = json.Unmarshal(body, &alts) + + return alts, err +} + +func (h *httpAlertAPI) Push(ctx context.Context, alerts ...Alert) error { + u := h.client.URL(epAlerts, nil) + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(&alerts); err != nil { + return err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), &buf) + if err != nil { + return fmt.Errorf("error creating request: %v", err) + } + + _, _, _, err = h.client.Do(ctx, req) // Ignoring warnings. + return err +} + +// SilenceAPI provides bindings for the Alertmanager's silence API. +type SilenceAPI interface { + // Get returns the silence associated with the given ID. + Get(ctx context.Context, id string) (*types.Silence, error) + // Set updates or creates the given silence and returns its ID. + Set(ctx context.Context, sil types.Silence) (string, error) + // Expire expires the silence with the given ID. + Expire(ctx context.Context, id string) error + // List returns silences matching the given filter. + List(ctx context.Context, filter string) ([]*types.Silence, error) +} + +// NewSilenceAPI returns a new SilenceAPI for the client. +func NewSilenceAPI(c api.Client) SilenceAPI { + return &httpSilenceAPI{client: apiClient{c}} +} + +type httpSilenceAPI struct { + client api.Client +} + +func (h *httpSilenceAPI) Get(ctx context.Context, id string) (*types.Silence, error) { + u := h.client.URL(epSilence, map[string]string{ + "id": id, + }) + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + + _, body, _, err := h.client.Do(ctx, req) // Ignoring warnings. + if err != nil { + return nil, err + } + + var sil types.Silence + err = json.Unmarshal(body, &sil) + + return &sil, err +} + +func (h *httpSilenceAPI) Expire(ctx context.Context, id string) error { + u := h.client.URL(epSilence, map[string]string{ + "id": id, + }) + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return fmt.Errorf("error creating request: %v", err) + } + + _, _, _, err = h.client.Do(ctx, req) // Ignoring warnings. + return err +} + +func (h *httpSilenceAPI) Set(ctx context.Context, sil types.Silence) (string, error) { + u := h.client.URL(epSilences, nil) + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(&sil); err != nil { + return "", err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), &buf) + if err != nil { + return "", fmt.Errorf("error creating request: %v", err) + } + + _, body, _, err := h.client.Do(ctx, req) // Ignoring warnings. + if err != nil { + return "", err + } + + var res struct { + SilenceID string `json:"silenceId"` + } + err = json.Unmarshal(body, &res) + + return res.SilenceID, err +} + +func (h *httpSilenceAPI) List(ctx context.Context, filter string) ([]*types.Silence, error) { + u := h.client.URL(epSilences, nil) + params := url.Values{} + if filter != "" { + params.Add("filter", filter) + } + u.RawQuery = params.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + + _, body, _, err := h.client.Do(ctx, req) // Ignoring warnings. + if err != nil { + return nil, err + } + + var sils []*types.Silence + err = json.Unmarshal(body, &sils) + + return sils, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bd44c20b6ef..15d9e3956a0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -384,6 +384,7 @@ github.com/prometheus/alertmanager/api/v2/restapi/operations/general github.com/prometheus/alertmanager/api/v2/restapi/operations/receiver github.com/prometheus/alertmanager/api/v2/restapi/operations/silence github.com/prometheus/alertmanager/asset +github.com/prometheus/alertmanager/client github.com/prometheus/alertmanager/cluster github.com/prometheus/alertmanager/cluster/clusterpb github.com/prometheus/alertmanager/config