diff --git a/.circleci/config.yml b/.circleci/config.yml index 6416a2e74dc..affb6be3f08 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ executors: # should also be updated. golang: docker: - - image: circleci/golang:1.11 + - image: circleci/golang:1.12 jobs: test: @@ -15,7 +15,11 @@ jobs: steps: - checkout - run: make promu - - run: make check_license style unused staticcheck build check_assets + - run: + command: make check_license style unused lint build check_assets + environment: + # Run garbage collection more aggresively to avoid getting OOMed during the lint phase. + GOGC: "20" - run: command: | curl -s -L https://github.com/protocolbuffers/protobuf/releases/download/v3.5.1/protoc-3.5.1-linux-x86_64.zip > /tmp/protoc.zip @@ -53,11 +57,11 @@ jobs: steps: - checkout - - setup_remote_docker + - setup_remote_docker: + version: 18.06.0-ce + - run: docker run --privileged linuxkit/binfmt:v0.6 - attach_workspace: at: . - - run: ln -s .build/linux-amd64/prometheus prometheus - - run: ln -s .build/linux-amd64/promtool promtool - run: make docker - run: make docker DOCKER_REPO=quay.io/prometheus - run: docker images @@ -65,16 +69,17 @@ jobs: - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io - run: make docker-publish - run: make docker-publish DOCKER_REPO=quay.io/prometheus + - run: make docker-manifest + - run: make docker-manifest DOCKER_REPO=quay.io/prometheus docker_hub_release_tags: executor: golang steps: - checkout - - setup_remote_docker - - run: mkdir -v -p ${HOME}/bin - - run: curl -L 'https://github.com/aktau/github-release/releases/download/v0.7.2/linux-amd64-github-release.tar.bz2' | tar xvjf - --strip-components 3 -C ${HOME}/bin - - run: echo 'export PATH=${HOME}/bin:${PATH}' >> ${BASH_ENV} + - setup_remote_docker: + version: 18.06.0-ce + - run: docker run --privileged linuxkit/binfmt:v0.6 - attach_workspace: at: . - run: make promu @@ -84,19 +89,29 @@ jobs: - store_artifacts: path: .tarballs destination: releases - - run: ln -s .build/linux-amd64/prometheus prometheus - - run: ln -s .build/linux-amd64/promtool promtool - run: make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG - run: make docker DOCKER_IMAGE_TAG=$CIRCLE_TAG DOCKER_REPO=quay.io/prometheus - run: docker login -u $DOCKER_LOGIN -p $DOCKER_PASSWORD - run: docker login -u $QUAY_LOGIN -p $QUAY_PASSWORD quay.io + - run: make docker-publish DOCKER_IMAGE_TAG="$CIRCLE_TAG" + - run: make docker-publish DOCKER_IMAGE_TAG="$CIRCLE_TAG" DOCKER_REPO=quay.io/prometheus + - run: make docker-manifest DOCKER_IMAGE_TAG="$CIRCLE_TAG" + - run: make docker-manifest DOCKER_IMAGE_TAG="$CIRCLE_TAG" DOCKER_REPO=quay.io/prometheus - run: | if [[ "$CIRCLE_TAG" =~ ^v[0-9]+(\.[0-9]+){2}$ ]]; then make docker-tag-latest DOCKER_IMAGE_TAG="$CIRCLE_TAG" make docker-tag-latest DOCKER_IMAGE_TAG="$CIRCLE_TAG" DOCKER_REPO=quay.io/prometheus + make docker-publish DOCKER_IMAGE_TAG="latest" + make docker-publish DOCKER_IMAGE_TAG="latest" DOCKER_REPO=quay.io/prometheus + make docker-manifest DOCKER_IMAGE_TAG="latest" + make docker-manifest DOCKER_IMAGE_TAG="latest" DOCKER_REPO=quay.io/prometheus fi - - run: make docker-publish - - run: make docker-publish DOCKER_REPO=quay.io/prometheus + + makefile_sync: + executor: golang + steps: + - checkout + - run: ./scripts/sync_makefiles.sh workflows: version: 2 @@ -126,3 +141,14 @@ workflows: only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/ branches: ignore: /.*/ + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - master + jobs: + - makefile_sync + diff --git a/.dockerignore b/.dockerignore index a4d092b226f..07a4d4f571c 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,3 +3,5 @@ data/ .tarballs/ !.build/linux-amd64/ +!.build/linux-armv7/ +!.build/linux-arm64/ diff --git a/.github/lock.yml b/.github/lock.yml new file mode 100644 index 00000000000..bed690b33b5 --- /dev/null +++ b/.github/lock.yml @@ -0,0 +1,35 @@ +# Configuration for Lock Threads - https://github.com/dessant/lock-threads + +# Number of days of inactivity before a closed issue or pull request is locked +daysUntilLock: 180 + +# Skip issues and pull requests created before a given timestamp. Timestamp must +# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable +skipCreatedBefore: false + +# Issues and pull requests with these labels will be ignored. Set to `[]` to disable +exemptLabels: [] + +# Label to add before locking, such as `outdated`. Set to `false` to disable +lockLabel: false + +# Comment to post before locking. Set to `false` to disable +lockComment: false + +# Assign `resolved` as the reason for locking. Set to `false` to disable +setLockReason: false + +# Limit to only `issues` or `pulls` +only: issues + +# Optionally, specify configuration settings just for `issues` or `pulls` +# issues: +# exemptLabels: +# - help-wanted +# lockLabel: outdated + +# pulls: +# daysUntilLock: 30 + +# Repository to extend settings from +# _extends: repo diff --git a/.gitignore b/.gitignore index 05ffbb95f2f..de38f8dc750 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,7 @@ *# .#* -*-stamp /*.yaml /*.yml -/*.rules *.exe /prometheus @@ -12,12 +10,9 @@ benchmark.txt /data /cmd/prometheus/data /cmd/prometheus/debug -/.build -/.release -/.tarballs -!/circle.yml !/.travis.yml !/.promu.yml +!/.golangci.yml /documentation/examples/remote_storage/remote_storage_adapter/remote_storage_adapter /documentation/examples/remote_storage/example_write_adapter/example_writer_adapter diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000000..1a05236e27d --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,13 @@ +run: + modules-download-mode: vendor + deadline: 5m + +issues: + exclude-rules: + - path: _test.go + linters: + - errcheck + +linters-settings: + errcheck: + exclude: scripts/errcheck_excludes.txt diff --git a/.promu.yml b/.promu.yml index c7c31f46daa..5af11f8c85e 100644 --- a/.promu.yml +++ b/.promu.yml @@ -1,7 +1,7 @@ go: # Whenever the Go version is updated here, .travis.yml and # .circle/config.yml should also be updated. - version: 1.11 + version: 1.12 repository: path: github.com/prometheus/prometheus build: diff --git a/.travis.yml b/.travis.yml index bb704e9cd49..f1e2287aaef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,14 +1,18 @@ -sudo: false - language: go # Whenever the Go version is updated here, .circleci/config.yml and .promu.yml # should also be updated. go: -- 1.11.x +- 1.12.x go_import_path: github.com/prometheus/prometheus +# This ensures that the local cache is filled before running the CI. +# travis_retry retries the command 3 times if it fails as we've experienced +# random issues on Travis. +before_install: +- travis_retry make deps + script: -- make check_license style unused test staticcheck check_assets +- make check_license style unused test lint check_assets - git diff --exit-code diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bbecba6979..88b8144c92f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,100 @@ +## 2.10.0 / 2019-05-25 + +* [CHANGE/BUGFIX] API: Encode alert values as string to correctly represent Inf/NaN. #5582 +* [FEATURE] Template expansion: Make external labels available as `$externalLabels` in alert and console template expansion. #5463 +* [FEATURE] TSDB: Add `prometheus_tsdb_wal_segment_current` metric for the WAL segment index that TSDB is currently writing to. tsdb#601 +* [FEATURE] Scrape: Add `scrape_series_added` per-scrape metric. #5546 +* [ENHANCEMENT] Discovery/kubernetes: Add labels `__meta_kubernetes_endpoint_node_name` and `__meta_kubernetes_endpoint_hostname`. #5571 +* [ENHANCEMENT] Discovery/azure: Add label `__meta_azure_machine_public_ip`. #5475 +* [ENHANCEMENT] TSDB: Simplify mergedPostings.Seek, resulting in better performance if there are many posting lists. tsdb#595 +* [ENHANCEMENT] Log filesystem type on startup. #5558 +* [ENHANCEMENT] Cmd/promtool: Use POST requests for Query and QueryRange. client_golang#557 +* [ENHANCEMENT] Web: Sort alerts by group name. #5448 +* [ENHANCEMENT] Console templates: Add convenience variables `$rawParams`, `$params`, `$path`. #5463 +* [BUGFIX] TSDB: Don't panic when running out of disk space and recover nicely from the condition. tsdb#582 +* [BUGFIX] TSDB: Correctly handle empty labels. tsdb#594 +* [BUGFIX] TSDB: Don't crash on an unknown tombstone reference. tsdb#604 +* [BUGFIX] Storage/remote: Remove queue-manager specific metrics if queue no longer exists. #5445 #5485 #5555 +* [BUGFIX] PromQL: Correctly display `{__name__="a"}`. #5552 +* [BUGFIX] Discovery/kubernetes: Use `service` rather than `ingress` as the name for the service workqueue. #5520 +* [BUGFIX] Discovery/azure: Don't panic on a VM with a public IP. #5587 +* [BUGFIX] Discovery/triton: Always read HTTP body to completion. #5596 +* [BUGFIX] Web: Fixed Content-Type for js and css instead of using `/etc/mime.types`. #5551 + +## 2.9.2 / 2019-04-24 + +* [BUGFIX] Make sure subquery range is taken into account for selection #5467 +* [BUGFIX] Exhaust every request body before closing it #5166 +* [BUGFIX] Cmd/promtool: return errors from rule evaluations #5483 +* [BUGFIX] Remote Storage: string interner should not panic in release #5487 +* [BUGFIX] Fix memory allocation regression in mergedPostings.Seek tsdb#586 + +## 2.9.1 / 2019-04-16 + +* [BUGFIX] Discovery/kubernetes: fix missing label sanitization #5462 +* [BUGFIX] Remote_write: Prevent reshard concurrent with calling stop #5460 + +## 2.9.0 / 2019-04-15 + +This releases uses Go 1.12, which includes a change in how memory is released +to Linux. This will cause RSS to be reported as higher, however this is harmless +and the memory is available to the kernel when it needs it. + +* [CHANGE/ENHANCEMENT] Update Consul to support catalog.ServiceMultipleTags. #5151 +* [FEATURE] Add honor_timestamps scrape option. #5304 +* [ENHANCEMENT] Discovery/kubernetes: add present labels for labels/annotations. #5443 +* [ENHANCEMENT] OpenStack SD: Add ProjectID and UserID meta labels. #5431 +* [ENHANCEMENT] Add GODEBUG and retention to the runtime page. #5324 #5322 +* [ENHANCEMENT] Add support for POSTing to /series endpoint. #5422 +* [ENHANCEMENT] Support PUT methods for Lifecycle and Admin APIs. #5376 +* [ENHANCEMENT] Scrape: Add global jitter for HA server. #5181 +* [ENHANCEMENT] Check for cancellation on every step of a range evaluation. #5131 +* [ENHANCEMENT] String interning for labels & values in the remote_write path. #5316 +* [ENHANCEMENT] Don't lose the scrape cache on a failed scrape. #5414 +* [ENHANCEMENT] Reload cert files from disk automatically. common#173 +* [ENHANCEMENT] Use fixed length millisecond timestamp format for logs. common#172 +* [ENHANCEMENT] Performance improvements for postings. tsdb#509 tsdb#572 +* [BUGFIX] Remote Write: fix checkpoint reading. #5429 +* [BUGFIX] Check if label value is valid when unmarshaling external labels from YAML. #5316 +* [BUGFIX] Promparse: sort all labels when parsing. #5372 +* [BUGFIX] Reload rules: copy state on both name and labels. #5368 +* [BUGFIX] Exponentation operator to drop metric name in result of operation. #5329 +* [BUGFIX] Config: resolve more file paths. #5284 +* [BUGFIX] Promtool: resolve relative paths in alert test files. #5336 +* [BUGFIX] Set TLSHandshakeTimeout in HTTP transport. common#179 +* [BUGFIX] Use fsync to be more resilient to machine crashes. tsdb#573 tsdb#578 +* [BUGFIX] Keep series that are still in WAL in checkpoints. tsdb#577 +* [BUGFIX] Fix output sample values for scalar-to-vector comparison operations. #5454 + +## 2.8.1 / 2019-03-28 + +* [BUGFIX] Display the job labels in `/targets` which was removed accidentally. #5406 + +## 2.8.0 / 2019-03-12 + +This release uses Write-Ahead Logging (WAL) for the remote_write API. This currently causes a slight increase in memory usage, which will be addressed in future releases. + +* [CHANGE] Default time retention is used only when no size based retention is specified. These are flags where time retention is specified by the flag `--storage.tsdb.retention` and size retention by `--storage.tsdb.retention.size`. #5216 +* [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`. prometheus/tsdb#506 +* [FEATURE] [EXPERIMENTAL] Time overlapping blocks are now allowed; vertical compaction and vertical query merge. It is an optional feature which is controlled by the `--storage.tsdb.allow-overlapping-blocks` flag, disabled by default. prometheus/tsdb#370 +* [ENHANCEMENT] Use the WAL for remote_write API. #4588 +* [ENHANCEMENT] Query performance improvements. prometheus/tsdb#531 +* [ENHANCEMENT] UI enhancements with upgrade to Bootstrap 4. #5226 +* [ENHANCEMENT] Reduce time that Alertmanagers are in flux when reloaded. #5126 +* [ENHANCEMENT] Limit number of metrics displayed on UI to 10000. #5139 +* [ENHANCEMENT] (1) Remember All/Unhealthy choice on target-overview when reloading page. (2) Resize text-input area on Graph page on mouseclick. #5201 +* [ENHANCEMENT] In `histogram_quantile` merge buckets with equivalent le values. #5158. +* [ENHANCEMENT] Show list of offending labels in the error message in many-to-many scenarios. #5189 +* [ENHANCEMENT] Show `Storage Retention` criteria in effect on `/status` page. #5322 +* [BUGFIX] Fix sorting of rule groups. #5260 +* [BUGFIX] Fix support for password_file and bearer_token_file in Kubernetes SD. #5211 +* [BUGFIX] Scrape: catch errors when creating HTTP clients #5182. Adds new metrics: + * `prometheus_target_scrape_pools_total` + * `prometheus_target_scrape_pools_failed_total` + * `prometheus_target_scrape_pool_reloads_total` + * `prometheus_target_scrape_pool_reloads_failed_total` +* [BUGFIX] Fix panic when aggregator param is not a literal. #5290 + ## 2.7.2 / 2019-03-02 * [BUGFIX] `prometheus_rule_group_last_evaluation_timestamp_seconds` is now a unix timestamp. #5186 @@ -12,7 +109,7 @@ This release has a fix for a Stored DOM XSS vulnerability that can be triggered ## 2.7.0 / 2019-01-28 -We're rolling back the Dockerfile changes introduced in 2.6.0. If you made changes to your docker deployment in 2.6.0, you will need to roll them back. This release also adds experimental support for disk size based retention. To accomodate that we are deprecating the flag `storage.tsdb.retention` in favour of `storage.tsdb.retention.time`. We print a warning if the flag is in use, but it will function without breaking until Prometheus 3.0. +We're rolling back the Dockerfile changes introduced in 2.6.0. If you made changes to your docker deployment in 2.6.0, you will need to roll them back. This release also adds experimental support for disk size based retention. To accommodate that we are deprecating the flag `storage.tsdb.retention` in favour of `storage.tsdb.retention.time`. We print a warning if the flag is in use, but it will function without breaking until Prometheus 3.0. * [CHANGE] Rollback Dockerfile to version at 2.5.0. Rollback of the breaking change introduced in 2.6.0. #5122 * [FEATURE] Add subqueries to PromQL. #4831 @@ -139,8 +236,8 @@ This release includes multiple bugfixes and features. Further, the WAL implement * [FEATURE] Persist alert 'for' state across restarts #4061 * [FEATURE] Add API providing per target metric metadata #4183 * [FEATURE] Add API providing recording and alerting rules #4318 #4501 -* [ENHANCEMENT] Brand new WAL implementation for TSDB. Forwards incompatible with previous WAL. -* [ENHANCEMENT] Show rule evaluation errors in UI #4457 +* [ENHANCEMENT] Brand new WAL implementation for TSDB. Forwards incompatible with previous WAL. +* [ENHANCEMENT] Show rule evaluation errors in UI #4457 * [ENHANCEMENT] Throttle resends of alerts to Alertmanager #4538 * [ENHANCEMENT] Send EndsAt along with the alert to Alertmanager #4550 * [ENHANCEMENT] Limit the samples returned by remote read endpoint #4532 @@ -1203,7 +1300,7 @@ All changes: from embedding into the binary. Those files are only used for debugging, and then you can use -web.use-local-assets. By including fewer files, the RAM usage during compilation is much more manageable. -* [ENHANCEMENT] Help link points to http://prometheus.github.io now. +* [ENHANCEMENT] Help link points to https://prometheus.github.io now. * [FEATURE] Consoles for haproxy and cloudwatch. * [BUGFIX] Several fixes to graphs in consoles. * [CLEANUP] Removed a file size check that did not check anything. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa44263b7bf..e2277a78880 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ Prometheus uses GitHub to manage reviews of pull requests. Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). + Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). * Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) @@ -40,6 +40,8 @@ go build ./cmd/prometheus/ make test # Make sure all the tests pass before you commit and push :) ``` +We use `golangci-lint`[https://github.com/golangci/golangci-lint] for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. + All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions). ## Pull Request Checklist @@ -54,7 +56,7 @@ All our issues are regularly tagged so that you can also filter down the issues ## Dependency management -The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.11 or greater installed. +The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. All dependencies are vendored in the `vendor/` directory. diff --git a/Dockerfile b/Dockerfile index 1a26f3b8220..fc5ca5933df 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ LABEL io.k8s.display-name="OpenShift Prometheus" \ io.k8s.description="The Prometheus monitoring system and time series database." \ io.openshift.tags="prometheus,monitoring" \ maintainer="The Prometheus Authors " \ - version="v2.7.2" + version="v2.10.0" ARG FROM_DIRECTORY=/go/src/github.com/prometheus/prometheus COPY --from=builder ${FROM_DIRECTORY}/prometheus /bin/prometheus diff --git a/Makefile b/Makefile index 25d145968a5..6b13f9b67c3 100644 --- a/Makefile +++ b/Makefile @@ -11,18 +11,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -include Makefile.common +# Needs to be defined before including Makefile.common to auto-generate targets +DOCKER_ARCHS ?= amd64 armv7 arm64 -STATICCHECK_IGNORE = \ - github.com/prometheus/prometheus/pkg/textparse/promlex.l.go:SA4006 \ - github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go:SA4006 +include Makefile.common DOCKER_IMAGE_NAME ?= prometheus -# Go modules needs the bzr binary because of the dependency on launchpad.net/gocheck. -$(eval $(call PRECHECK_COMMAND_template,bzr)) -PRECHECK_OPTIONS_bzr = version - .PHONY: assets assets: @echo ">> writing assets" diff --git a/Makefile.common b/Makefile.common index 89249aa39b4..38066679aa0 100644 --- a/Makefile.common +++ b/Makefile.common @@ -36,7 +36,8 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -unexport GOVENDOR +GOVENDOR := +GO111MODULE := ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). @@ -57,15 +58,12 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$( # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif - - unexport GO111MODULE endif ifeq ($(BUILD_PROMU),false) PROMU := promu else PROMU := $(FIRST_GOPATH)/bin/promu endif -STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck pkgs = ./... ifeq (arm, $(GOHOSTARCH)) @@ -75,16 +73,31 @@ else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif -PROMU_VERSION ?= 0.2.0 +PROMU_VERSION ?= 0.4.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz -STATICCHECK_VERSION ?= 2019.1 -STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH) + +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.16.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif +endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_REPO ?= prom +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -92,14 +105,14 @@ ifeq ($(GOHOSTARCH),amd64) endif endif -.PHONY: all -all: precheck style staticcheck unused build test - # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; +.PHONY: common-all +common-all: precheck style check_license lint unused build test + .PHONY: common-style common-style: @echo ">> checking code style" @@ -121,6 +134,15 @@ common-check_license: exit 1; \ fi +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" +ifdef GO111MODULE + GO111MODULE=$(GO111MODULE) $(GO) mod download +else + $(GO) get $(GOOPTS) -t ./... +endif + .PHONY: common-test-short common-test-short: @echo ">> running short tests" @@ -141,18 +163,23 @@ common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) -.PHONY: common-staticcheck -common-staticcheck: $(STATICCHECK) - @echo ">> running staticcheck" - chmod +x $(STATICCHECK) +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) else - $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + $(GOLANGCI_LINT) run $(pkgs) endif +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) @@ -183,17 +210,28 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) -.PHONY: common-docker -common-docker: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" . - -.PHONY: common-docker-publish -common-docker-publish: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" - -.PHONY: common-docker-tag-latest -common-docker-tag-latest: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest" +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + . + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .PHONY: promu ifeq ($(BUILD_PROMU),false) @@ -215,9 +253,11 @@ proto: @echo ">> generating code from proto files" @./scripts/genproto.sh -$(STATICCHECK): +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin - curl -s -L $(STATICCHECK_URL) > $(STATICCHECK) + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif ifdef GOVENDOR .PHONY: $(GOVENDOR) @@ -231,7 +271,6 @@ precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck - PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: diff --git a/NOTICE b/NOTICE index 2e141355cf3..e36e57e5276 100644 --- a/NOTICE +++ b/NOTICE @@ -2,13 +2,13 @@ The Prometheus systems and service monitoring server Copyright 2012-2015 The Prometheus Authors This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). +SoundCloud Ltd. (https://soundcloud.com/). The following components are included in this product: Bootstrap -http://getbootstrap.com +https://getbootstrap.com Copyright 2011-2014 Twitter, Inc. Licensed under the MIT License @@ -52,7 +52,7 @@ Copyright jQuery Foundation and other contributors Licensed under the MIT License Protocol Buffers for Go with Gadgets -http://github.com/gogo/protobuf/ +https://github.com/gogo/protobuf/ Copyright (c) 2013, The GoGo Authors. See source code for license details. @@ -67,7 +67,7 @@ Copyright 2013 Matt T. Proud Licensed under the Apache License, Version 2.0 DNS library in Go -http://miek.nl/posts/2014/Aug/16/go-dns-package/ +https://miek.nl/2014/august/16/go-dns-package/ Copyright 2009 The Go Authors, 2011 Miek Gieben See https://github.com/miekg/dns/blob/master/LICENSE for license details. diff --git a/README.md b/README.md index fd20383d98b..908a2cccfb4 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ Prometheus will now be reachable at http://localhost:9090/. ### Building from source To build Prometheus from the source code yourself you need to have a working -Go environment with [version 1.11 or greater installed](http://golang.org/doc/install). +Go environment with [version 1.12 or greater installed](https://golang.org/doc/install). You can directly use the `go` tool to download and install the `prometheus` and `promtool` binaries into your `GOPATH`: @@ -87,7 +87,7 @@ The Makefile provides several targets: ## More information - * The source code is periodically indexed: [Prometheus Core](http://godoc.org/github.com/prometheus/prometheus). + * The source code is periodically indexed: [Prometheus Core](https://godoc.org/github.com/prometheus/prometheus). * You will find a Travis CI configuration in `.travis.yml`. * See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels. diff --git a/RELEASE.md b/RELEASE.md index f889424f514..b75094b0d1b 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -6,14 +6,16 @@ This page describes the release process and the currently planned schedule for u Release cadence of first pre-releases being cut is 6 weeks. -| release series | date of first pre-release (year-month-day) | release shepherd | +| release series | date of first pre-release (year-month-day) | release shepherd | |----------------|--------------------------------------------|---------------------------------------------| | v2.4 | 2018-09-06 | Goutham Veeramachaneni (GitHub: @gouthamve) | | v2.5 | 2018-10-24 | Frederic Branczyk (GitHub: @brancz) | | v2.6 | 2018-12-05 | Simon Pasquier (GitHub: @simonpasquier) | | v2.7 | 2019-01-16 | Goutham Veeramachaneni (GitHub: @gouthamve) | -| v2.8 | 2019-02-27 | **searching for volunteer** | -| v2.9 | 2019-04-10 | **searching for volunteer** | +| v2.8 | 2019-02-27 | Ganesh Vernekar (GitHub: @codesome) | +| v2.9 | 2019-04-10 | Brian Brazil (GitHub: @brian-brazil) | +| v2.10 | 2019-05-22 | Björn Rabenstein (GitHub: @beorn7) | +| v2.11 | 2019-07-03 | **searching for volunteer** | If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. @@ -26,6 +28,8 @@ The release shepherd is responsible for the entire release series of a minor rel * Once a pre-release has been released, the `master` branch of the repository is frozen for any feature work, only critical bug fix work concerning the minor release is merged. * Pre-releases are done from `master`, after pre-releases are promoted to the stable release a `release-major.minor` branch is created. +_Experimental change of the above procedure for the v2.10 release: The `release-2.10` branch is created at the time the first pre-release is cut. All releases of the series including pre-releases are cut from the `release-2.10` branch. `master` will not be frozen for feature work._ + See the next section for details on cutting an individual release. ## How to cut an individual release @@ -34,7 +38,7 @@ These instructions are currently valid for the Prometheus server, i.e. the [prom ### Branch management and versioning strategy -We use [Semantic Versioning](http://semver.org/). +We use [Semantic Versioning](https://semver.org/). We maintain a separate branch for each minor release, named `release-.`, e.g. `release-1.1`, `release-2.0`. @@ -90,13 +94,13 @@ If the release has happened in the latest release branch, merge the changes into To update the docs, a PR needs to be created to `prometheus/docs`. See [this PR](https://github.com/prometheus/docs/pull/952/files) for inspiration. -Once the binaries have been uploaded, announce the release on `prometheus-users@googlegroups.com`. Start the subject with `[ANN]`. Check out previous announcement mails for inspiration. +Once the binaries have been uploaded, announce the release on `prometheus-announce@googlegroups.com`. (Please do not use `prometheus-users@googlegroups.com` for announcements anymore.) Check out previous announcement mails for inspiration. ### Pre-releases The following changes to the above procedures apply: -* In line with [Semantic Versioning](http://semver.org/), append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.). +* In line with [Semantic Versioning](https://semver.org/), append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.). * Tick the _This is a pre-release_ box when drafting the release in the Github UI. * Still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update. diff --git a/VERSION b/VERSION index 37c2961c243..10c2c0c3d62 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.7.2 +2.10.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index e1321b93ae6..5529f0dcb91 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -19,7 +19,6 @@ import ( "crypto/md5" "encoding/json" "fmt" - "math" "net" "net/http" _ "net/http/pprof" // Comment this line to disable pprof endpoint. @@ -36,23 +35,23 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/oklog/oklog/pkg/group" + conntrack "github.com/mwitkow/go-conntrack" + "github.com/oklog/run" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promlog" "github.com/prometheus/common/version" - prom_runtime "github.com/prometheus/prometheus/pkg/runtime" - "gopkg.in/alecthomas/kingpin.v2" + kingpin "gopkg.in/alecthomas/kingpin.v2" "k8s.io/klog" - "github.com/mwitkow/go-conntrack" - "github.com/prometheus/common/promlog" promlogflag "github.com/prometheus/common/promlog/flag" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" sd_config "github.com/prometheus/prometheus/discovery/config" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/pkg/relabel" + prom_runtime "github.com/prometheus/prometheus/pkg/runtime" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" @@ -172,6 +171,9 @@ func main() { a.Flag("web.page-title", "Document title of Prometheus instance."). Default("Prometheus Time Series Collection and Processing Server").StringVar(&cfg.web.PageTitle) + a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`). + Default(".*").StringVar(&cfg.corsRegexString) + a.Flag("storage.tsdb.path", "Base path for metrics storage."). Default("data/").StringVar(&cfg.localStoragePath) @@ -179,25 +181,28 @@ func main() { Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration) a.Flag("storage.tsdb.max-block-duration", - "Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period)."). + "Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)"). Hidden().PlaceHolder("").SetValue(&cfg.tsdb.MaxBlockDuration) a.Flag("storage.tsdb.wal-segment-size", - "Size at which to split the tsdb WAL segment files (e.g. 100MB)"). + "Size at which to split the tsdb WAL segment files. Example: 100MB"). Hidden().PlaceHolder("").BytesVar(&cfg.tsdb.WALSegmentSize) - a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead"). - Default(defaultRetentionString).SetValue(&oldFlagRetentionDuration) + a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead."). + SetValue(&oldFlagRetentionDuration) - a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. Overrides \"storage.tsdb.retention\" if this flag is set to anything other than default."). - Default(defaultRetentionString).SetValue(&newFlagRetentionDuration) + a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+"."). + SetValue(&newFlagRetentionDuration) a.Flag("storage.tsdb.retention.size", "[EXPERIMENTAL] Maximum number of bytes that can be stored for blocks. Units supported: KB, MB, GB, TB, PB. This flag is experimental and can be changed in future releases."). - Default("0").BytesVar(&cfg.tsdb.MaxBytes) + BytesVar(&cfg.tsdb.MaxBytes) a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). Default("false").BoolVar(&cfg.tsdb.NoLockfile) + a.Flag("storage.tsdb.allow-overlapping-blocks", "[EXPERIMENTAL] Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge."). + Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks) + a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload."). Default("1m").PlaceHolder("").SetValue(&cfg.RemoteFlushDeadline) @@ -207,10 +212,10 @@ func main() { a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit."). Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit) - a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring 'for' state of alert."). + a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert."). Default("1h").SetValue(&cfg.outageTolerance) - a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored 'for' state. This is maintained only for alerts with configured 'for' time greater than grace period."). + a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period."). Default("10m").SetValue(&cfg.forGracePeriod) a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). @@ -222,7 +227,7 @@ func main() { a.Flag("alertmanager.timeout", "Timeout for sending alerts to Alertmanager."). Default("10s").SetValue(&cfg.notifierTimeout) - a.Flag("query.lookback-delta", "The delta difference allowed for retrieving metrics during expression evaluations."). + a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations."). Default("5m").SetValue(&cfg.lookbackDelta) a.Flag("query.timeout", "Maximum time a query may take before being aborted."). @@ -230,11 +235,9 @@ func main() { a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently."). Default("20").IntVar(&cfg.queryConcurrency) - a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they would load more samples than this into memory, so this also limits the number of samples a query can return."). - Default("50000000").IntVar(&cfg.queryMaxSamples) - a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Eg. 'https?://(domain1|domain2)\.com'`). - Default(".*").StringVar(&cfg.corsRegexString) + a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return."). + Default("50000000").IntVar(&cfg.queryMaxSamples) promlogflag.AddFlags(a, &cfg.promlogConfig) @@ -245,6 +248,8 @@ func main() { os.Exit(2) } + logger := promlog.New(&cfg.promlogConfig) + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) if err != nil { fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL)) @@ -265,36 +270,51 @@ func main() { // RoutePrefix must always be at least '/'. cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/") - cfg.tsdb.RetentionDuration = chooseRetention(oldFlagRetentionDuration, newFlagRetentionDuration) + { // Time retention settings. + if oldFlagRetentionDuration != 0 { + level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.") + cfg.tsdb.RetentionDuration = oldFlagRetentionDuration + } - // Check for overflows. This limits our max retention to ~292.5y. - if cfg.tsdb.RetentionDuration < 0 { - cfg.tsdb.RetentionDuration = math.MaxInt64 - } + // When the new flag is set it takes precedence. + if newFlagRetentionDuration != 0 { + cfg.tsdb.RetentionDuration = newFlagRetentionDuration + } - if cfg.tsdb.MaxBlockDuration == 0 { - cfg.tsdb.MaxBlockDuration = cfg.tsdb.RetentionDuration / 10 + if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { + cfg.tsdb.RetentionDuration = defaultRetentionDuration + level.Info(logger).Log("msg", "no time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) + } - // Prevent blocks from getting too big. - monthLong, err := model.ParseDuration("31d") - if err != nil { - panic(err) + // Check for overflows. This limits our max retention to 100y. + if cfg.tsdb.RetentionDuration < 0 { + y, err := model.ParseDuration("100y") + if err != nil { + panic(err) + } + cfg.tsdb.RetentionDuration = y + level.Warn(logger).Log("msg", "time retention value is too high. Limiting to: "+y.String()) } + } - if cfg.tsdb.MaxBlockDuration > monthLong { - cfg.tsdb.MaxBlockDuration = monthLong + { // Max block size settings. + if cfg.tsdb.MaxBlockDuration == 0 { + maxBlockDuration, err := model.ParseDuration("31d") + if err != nil { + panic(err) + } + // When the time retention is set and not too big use to define the max block duration. + if cfg.tsdb.RetentionDuration != 0 && cfg.tsdb.RetentionDuration/10 < maxBlockDuration { + maxBlockDuration = cfg.tsdb.RetentionDuration / 10 + } + + cfg.tsdb.MaxBlockDuration = maxBlockDuration } } promql.LookbackDelta = time.Duration(cfg.lookbackDelta) promql.SetDefaultEvaluationInterval(time.Duration(config.DefaultGlobalConfig.EvaluationInterval)) - logger := promlog.New(&cfg.promlogConfig) - - if oldFlagRetentionDuration != defaultRetentionDuration { - level.Warn(logger).Log("deprecation_notice", `"storage.tsdb.retention" flag is deprecated use "storage.tsdb.retention.time" instead.`) - } - // Above level 6, the k8s client would log bearer tokens in clear-text. klog.ClampLevel(6) klog.SetLogger(log.With(logger, "component", "k8s_client_runtime")) @@ -307,7 +327,7 @@ func main() { var ( localStorage = &tsdb.ReadyStorage{} - remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), localStorage.StartTime, time.Duration(cfg.RemoteFlushDeadline)) + remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline)) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) ) @@ -356,6 +376,7 @@ func main() { cfg.web.ScrapeManager = scrapeManager cfg.web.RuleManager = ruleManager cfg.web.Notifier = notifierManager + cfg.web.TSDBCfg = cfg.tsdb cfg.web.Version = &web.PrometheusVersion{ Version: version.Version, @@ -391,7 +412,6 @@ func main() { webHandler.ApplyConfig, // The Scrape and notifier managers need to reload before the Discovery manager as // they need to read the most updated config when receiving the new targets list. - notifierManager.ApplyConfig, scrapeManager.ApplyConfig, func(cfg *config.Config) error { c := make(map[string]sd_config.ServiceDiscoveryConfig) @@ -400,6 +420,7 @@ func main() { } return discoveryManagerScrape.ApplyConfig(c) }, + notifierManager.ApplyConfig, func(cfg *config.Config) error { c := make(map[string]sd_config.ServiceDiscoveryConfig) for _, v := range cfg.AlertingConfig.AlertmanagerConfigs { @@ -419,11 +440,15 @@ func main() { fs, err := filepath.Glob(pat) if err != nil { // The only error can be a bad pattern. - return fmt.Errorf("error retrieving rule files for %s: %s", pat, err) + return errors.Wrapf(err, "error retrieving rule files for %s", pat) } files = append(files, fs...) } - return ruleManager.Update(time.Duration(cfg.GlobalConfig.EvaluationInterval), files) + return ruleManager.Update( + time.Duration(cfg.GlobalConfig.EvaluationInterval), + files, + cfg.GlobalConfig.ExternalLabels, + ) }, } @@ -450,7 +475,7 @@ func main() { }) } - var g group.Group + var g run.Group { // Termination handler. term := make(chan os.Signal, 1) @@ -580,7 +605,7 @@ func main() { } if err := reloadConfig(cfg.configFile, logger, reloaders...); err != nil { - return fmt.Errorf("error loading config from %q: %s", cfg.configFile, err) + return errors.Wrapf(err, "error loading config from %q", cfg.configFile) } reloadReady.Close() @@ -630,9 +655,19 @@ func main() { &cfg.tsdb, ) if err != nil { - return fmt.Errorf("opening storage failed: %s", err) + return errors.Wrapf(err, "opening storage failed") } + level.Info(logger).Log("fs_type", prom_runtime.Statfs(cfg.localStoragePath)) level.Info(logger).Log("msg", "TSDB started") + level.Debug(logger).Log("msg", "TSDB options", + "MinBlockDuration", cfg.tsdb.MinBlockDuration, + "MaxBlockDuration", cfg.tsdb.MaxBlockDuration, + "MaxBytes", cfg.tsdb.MaxBytes, + "NoLockfile", cfg.tsdb.NoLockfile, + "RetentionDuration", cfg.tsdb.RetentionDuration, + "WALSegmentSize", cfg.tsdb.WALSegmentSize, + "AllowOverlappingBlocks", cfg.tsdb.AllowOverlappingBlocks, + ) startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000) localStorage.Set(db, startTimeMargin) @@ -653,7 +688,7 @@ func main() { g.Add( func() error { if err := webHandler.Run(ctxWeb); err != nil { - return fmt.Errorf("error starting web server: %s", err) + return errors.Wrapf(err, "error starting web server") } return nil }, @@ -705,7 +740,7 @@ func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config conf, err := config.LoadFile(filename) if err != nil { - return fmt.Errorf("couldn't load configuration (--config.file=%q): %v", filename, err) + return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename) } failed := false @@ -716,8 +751,9 @@ func reloadConfig(filename string, logger log.Logger, rls ...func(*config.Config } } if failed { - return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) + return errors.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) } + promql.SetDefaultEvaluationInterval(time.Duration(conf.GlobalConfig.EvaluationInterval)) level.Info(logger).Log("msg", "Completed loading of configuration file", "filename", filename) return nil @@ -753,7 +789,7 @@ func computeExternalURL(u, listenAddr string) (*url.URL, error) { } if startsOrEndsWithQuote(u) { - return nil, fmt.Errorf("URL must not begin or end with quotes") + return nil, errors.New("URL must not begin or end with quotes") } eu, err := url.Parse(u) @@ -799,19 +835,3 @@ func sendAlerts(s sender, externalURL string) rules.NotifyFunc { } } } - -// chooseRetention is some roundabout code to support both RetentionDuration and Retention (for different flags). -// If Retention is 15d, then it means that the default value is set and the value of RetentionDuration is used. -func chooseRetention(oldFlagDuration, newFlagDuration model.Duration) model.Duration { - retention := oldFlagDuration - if retention == defaultRetentionDuration { - retention = newFlagDuration - } - - // Further newFlag takes precedence if it's set to anything other than default. - if newFlagDuration != defaultRetentionDuration { - retention = newFlagDuration - } - - return retention -} diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 0671d759ec6..e82e55bc32b 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -25,7 +25,6 @@ import ( "testing" "time" - "github.com/prometheus/common/model" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/rules" @@ -285,31 +284,3 @@ func TestWALSegmentSizeBounds(t *testing.T) { } } } - -func TestChooseRetention(t *testing.T) { - retention1, err := model.ParseDuration("20d") - testutil.Ok(t, err) - retention2, err := model.ParseDuration("30d") - testutil.Ok(t, err) - - cases := []struct { - oldFlagRetention model.Duration - newFlagRetention model.Duration - - chosen model.Duration - }{ - // Both are default (unset flags). - {defaultRetentionDuration, defaultRetentionDuration, defaultRetentionDuration}, - // Old flag is set and new flag is unset. - {retention1, defaultRetentionDuration, retention1}, - // Old flag is unset and new flag is set. - {defaultRetentionDuration, retention2, retention2}, - // Both flags are set. - {retention1, retention2, retention2}, - } - - for _, tc := range cases { - retention := chooseRetention(tc.oldFlagRetention, tc.newFlagRetention) - testutil.Equals(t, tc.chosen, retention) - } -} diff --git a/cmd/promtool/archive.go b/cmd/promtool/archive.go index dc375610d6b..783d8294f46 100644 --- a/cmd/promtool/archive.go +++ b/cmd/promtool/archive.go @@ -16,8 +16,9 @@ package main import ( "archive/tar" "compress/gzip" - "fmt" "os" + + "github.com/pkg/errors" ) const filePerm = 0644 @@ -31,7 +32,7 @@ type tarGzFileWriter struct { func newTarGzFileWriter(archiveName string) (*tarGzFileWriter, error) { file, err := os.Create(archiveName) if err != nil { - return nil, fmt.Errorf("error creating archive %q: %s", archiveName, err) + return nil, errors.Wrapf(err, "error creating archive %q", archiveName) } gzw := gzip.NewWriter(file) tw := tar.NewWriter(gzw) diff --git a/cmd/promtool/http.go b/cmd/promtool/http.go deleted file mode 100644 index a0bf34d35cd..00000000000 --- a/cmd/promtool/http.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/prometheus/client_golang/api" -) - -const defaultTimeout = 2 * time.Minute - -type prometheusHTTPClient struct { - requestTimeout time.Duration - httpClient api.Client -} - -func newPrometheusHTTPClient(serverURL string) (*prometheusHTTPClient, error) { - hc, err := api.NewClient(api.Config{ - Address: serverURL, - }) - if err != nil { - return nil, fmt.Errorf("error creating HTTP client: %s", err) - } - return &prometheusHTTPClient{ - requestTimeout: defaultTimeout, - httpClient: hc, - }, nil -} - -func (c *prometheusHTTPClient) do(req *http.Request) (*http.Response, []byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) - defer cancel() - return c.httpClient.Do(ctx, req) -} - -func (c *prometheusHTTPClient) urlJoin(path string) string { - return c.httpClient.URL(path, nil).String() -} diff --git a/cmd/promtool/http_test.go b/cmd/promtool/http_test.go deleted file mode 100644 index b9783daaf5b..00000000000 --- a/cmd/promtool/http_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import "testing" - -func TestURLJoin(t *testing.T) { - - testCases := []struct { - inputHost string - inputPath string - expected string - }{ - {"http://host", "path", "http://host/path"}, - {"http://host", "path/", "http://host/path"}, - {"http://host", "/path", "http://host/path"}, - {"http://host", "/path/", "http://host/path"}, - - {"http://host/", "path", "http://host/path"}, - {"http://host/", "path/", "http://host/path"}, - {"http://host/", "/path", "http://host/path"}, - {"http://host/", "/path/", "http://host/path"}, - - {"https://host", "path", "https://host/path"}, - {"https://host", "path/", "https://host/path"}, - {"https://host", "/path", "https://host/path"}, - {"https://host", "/path/", "https://host/path"}, - - {"https://host/", "path", "https://host/path"}, - {"https://host/", "path/", "https://host/path"}, - {"https://host/", "/path", "https://host/path"}, - {"https://host/", "/path/", "https://host/path"}, - } - for i, c := range testCases { - client, err := newPrometheusHTTPClient(c.inputHost) - if err != nil { - panic(err) - } - actual := client.urlJoin(c.inputPath) - if actual != c.expected { - t.Errorf("Error on case %d: %v(actual) != %v(expected)", i, actual, c.expected) - } - t.Logf("Case %d: %v(actual) == %v(expected)", i, actual, c.expected) - } -} diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 7a9192ea621..51d5a6ca623 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -26,15 +26,15 @@ import ( "strings" "time" - "gopkg.in/alecthomas/kingpin.v2" - "github.com/google/pprof/profile" "github.com/pkg/errors" "github.com/prometheus/client_golang/api" - "github.com/prometheus/client_golang/api/prometheus/v1" + v1 "github.com/prometheus/client_golang/api/prometheus/v1" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" + kingpin "gopkg.in/alecthomas/kingpin.v2" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/rulefmt" "github.com/prometheus/prometheus/util/promlint" @@ -202,10 +202,10 @@ func checkConfig(filename string) ([]string, error) { // If an explicit file was given, error if it is not accessible. if !strings.Contains(rf, "*") { if len(rfs) == 0 { - return nil, fmt.Errorf("%q does not point to an existing file", rf) + return nil, errors.Errorf("%q does not point to an existing file", rf) } if err := checkFileExists(rfs[0]); err != nil { - return nil, fmt.Errorf("error checking rule file %q: %s", rfs[0], err) + return nil, errors.Wrapf(err, "error checking rule file %q", rfs[0]) } } ruleFiles = append(ruleFiles, rfs...) @@ -213,7 +213,7 @@ func checkConfig(filename string) ([]string, error) { for _, scfg := range cfg.ScrapeConfigs { if err := checkFileExists(scfg.HTTPClientConfig.BearerTokenFile); err != nil { - return nil, fmt.Errorf("error checking bearer token file %q: %s", scfg.HTTPClientConfig.BearerTokenFile, err) + return nil, errors.Wrapf(err, "error checking bearer token file %q", scfg.HTTPClientConfig.BearerTokenFile) } if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig); err != nil { @@ -221,7 +221,7 @@ func checkConfig(filename string) ([]string, error) { } for _, kd := range scfg.ServiceDiscoveryConfig.KubernetesSDConfigs { - if err := checkTLSConfig(kd.TLSConfig); err != nil { + if err := checkTLSConfig(kd.HTTPClientConfig.TLSConfig); err != nil { return nil, err } } @@ -247,17 +247,17 @@ func checkConfig(filename string) ([]string, error) { func checkTLSConfig(tlsConfig config_util.TLSConfig) error { if err := checkFileExists(tlsConfig.CertFile); err != nil { - return fmt.Errorf("error checking client cert file %q: %s", tlsConfig.CertFile, err) + return errors.Wrapf(err, "error checking client cert file %q", tlsConfig.CertFile) } if err := checkFileExists(tlsConfig.KeyFile); err != nil { - return fmt.Errorf("error checking client key file %q: %s", tlsConfig.KeyFile, err) + return errors.Wrapf(err, "error checking client key file %q", tlsConfig.KeyFile) } if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 { - return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) + return errors.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile) } if len(tlsConfig.KeyFile) > 0 && len(tlsConfig.CertFile) == 0 { - return fmt.Errorf("client key file %q specified without client cert file", tlsConfig.KeyFile) + return errors.Errorf("client key file %q specified without client cert file", tlsConfig.KeyFile) } return nil @@ -510,7 +510,7 @@ func parseTime(s string) (time.Time, error) { if t, err := time.Parse(time.RFC3339Nano, s); err == nil { return t, nil } - return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s) + return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s) } type endpointsGroup struct { @@ -619,11 +619,14 @@ func (p *promqlPrinter) printLabelValues(val model.LabelValues) { type jsonPrinter struct{} func (j *jsonPrinter) printValue(v model.Value) { + //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } func (j *jsonPrinter) printSeries(v []model.LabelSet) { + //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } func (j *jsonPrinter) printLabelValues(v model.LabelValues) { + //nolint:errcheck json.NewEncoder(os.Stdout).Encode(v) } diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index 8824fba7bea..84ff006db44 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -17,26 +17,27 @@ import ( "fmt" "net/http" "net/http/httptest" - "net/url" "testing" "time" ) func TestQueryRange(t *testing.T) { - s, getURL := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`) + s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`) defer s.Close() p := &promqlPrinter{} exitCode := QueryRange(s.URL, "up", "0", "300", 0, p) expectedPath := "/api/v1/query_range" - if getURL().Path != expectedPath { - t.Errorf("unexpected URL path %s (wanted %s)", getURL().Path, expectedPath) + gotPath := getRequest().URL.Path + if gotPath != expectedPath { + t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath) } - actual := getURL().Query().Get("query") + form := getRequest().Form + actual := form.Get("query") if actual != "up" { t.Errorf("unexpected value %s for query", actual) } - actual = getURL().Query().Get("step") + actual = form.Get("step") if actual != "1.000" { t.Errorf("unexpected value %s for step", actual) } @@ -45,14 +46,16 @@ func TestQueryRange(t *testing.T) { } exitCode = QueryRange(s.URL, "up", "0", "300", 10*time.Millisecond, p) - if getURL().Path != expectedPath { - t.Errorf("unexpected URL path %s (wanted %s)", getURL().Path, expectedPath) + gotPath = getRequest().URL.Path + if gotPath != expectedPath { + t.Errorf("unexpected URL path %s (wanted %s)", gotPath, expectedPath) } - actual = getURL().Query().Get("query") + form = getRequest().Form + actual = form.Get("query") if actual != "up" { t.Errorf("unexpected value %s for query", actual) } - actual = getURL().Query().Get("step") + actual = form.Get("step") if actual != "0.010" { t.Errorf("unexpected value %s for step", actual) } @@ -61,16 +64,17 @@ func TestQueryRange(t *testing.T) { } } -func mockServer(code int, body string) (*httptest.Server, func() *url.URL) { - var u *url.URL +func mockServer(code int, body string) (*httptest.Server, func() *http.Request) { + var req *http.Request server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u = r.URL + r.ParseForm() + req = r w.WriteHeader(code) fmt.Fprintln(w, body) })) - f := func() *url.URL { - return u + f := func() *http.Request { + return req } return server, f } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 7c0599cd840..9d72bd39a20 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -18,13 +18,16 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" "reflect" "sort" "strconv" "strings" "time" - "gopkg.in/yaml.v2" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" @@ -67,6 +70,7 @@ func ruleUnitTest(filename string) []error { if err := yaml.UnmarshalStrict(b, &unitTestInp); err != nil { return []error{err} } + resolveFilepaths(filepath.Dir(filename), &unitTestInp) if unitTestInp.EvaluationInterval == 0 { unitTestInp.EvaluationInterval = 1 * time.Minute @@ -84,7 +88,7 @@ func ruleUnitTest(filename string) []error { groupOrderMap := make(map[string]int) for i, gn := range unitTestInp.GroupEvalOrder { if _, ok := groupOrderMap[gn]; ok { - return []error{fmt.Errorf("group name repeated in evaluation order: %s", gn)} + return []error{errors.Errorf("group name repeated in evaluation order: %s", gn)} } groupOrderMap[gn] = i } @@ -124,6 +128,16 @@ func (utf *unitTestFile) maxEvalTime() time.Duration { return maxd } +// resolveFilepaths joins all relative paths in a configuration +// with a given base directory. +func resolveFilepaths(baseDir string, utf *unitTestFile) { + for i, rf := range utf.RuleFiles { + if rf != "" && !filepath.IsAbs(rf) { + utf.RuleFiles[i] = filepath.Join(baseDir, rf) + } + } +} + // testGroup is a group of input series and tests associated with it. type testGroup struct { Interval time.Duration `yaml:"interval"` @@ -147,10 +161,11 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou Appendable: suite.Storage(), Context: context.Background(), NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, - Logger: &dummyLogger{}, + Logger: log.NewNopLogger(), } m := rules.NewManager(opts) - groupsMap, ers := m.LoadGroups(tg.Interval, ruleFiles...) + // TODO(beorn7): Provide a way to pass in external labels. + groupsMap, ers := m.LoadGroups(tg.Interval, nil, ruleFiles...) if ers != nil { return ers } @@ -197,6 +212,12 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou } for _, g := range groups { g.Eval(suite.Context(), ts) + for _, r := range g.Rules() { + if r.LastError() != nil { + errs = append(errs, errors.Errorf(" rule: %s, time: %s, err: %v", + r.Name(), ts.Sub(time.Unix(0, 0)), r.LastError())) + } + } } }) if len(errs) > 0 { @@ -261,14 +282,14 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou } if gotAlerts.Len() != expAlerts.Len() { - errs = append(errs, fmt.Errorf(" alertname:%s, time:%s, \n exp:%#v, \n got:%#v", + errs = append(errs, errors.Errorf(" alertname:%s, time:%s, \n exp:%#v, \n got:%#v", testcase.Alertname, testcase.EvalTime.String(), expAlerts.String(), gotAlerts.String())) } else { sort.Sort(gotAlerts) sort.Sort(expAlerts) if !reflect.DeepEqual(expAlerts, gotAlerts) { - errs = append(errs, fmt.Errorf(" alertname:%s, time:%s, \n exp:%#v, \n got:%#v", + errs = append(errs, errors.Errorf(" alertname:%s, time:%s, \n exp:%#v, \n got:%#v", testcase.Alertname, testcase.EvalTime.String(), expAlerts.String(), gotAlerts.String())) } } @@ -284,7 +305,7 @@ Outer: got, err := query(suite.Context(), testCase.Expr, mint.Add(testCase.EvalTime), suite.QueryEngine(), suite.Queryable()) if err != nil { - errs = append(errs, fmt.Errorf(" expr:'%s', time:%s, err:%s", testCase.Expr, + errs = append(errs, errors.Errorf(" expr:'%s', time:%s, err:%s", testCase.Expr, testCase.EvalTime.String(), err.Error())) continue } @@ -301,7 +322,7 @@ Outer: for _, s := range testCase.ExpSamples { lb, err := promql.ParseMetric(s.Labels) if err != nil { - errs = append(errs, fmt.Errorf(" expr:'%s', time:%s, err:%s", testCase.Expr, + errs = append(errs, errors.Errorf(" expr:'%s', time:%s, err:%s", testCase.Expr, testCase.EvalTime.String(), err.Error())) continue Outer } @@ -318,7 +339,7 @@ Outer: return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 }) if !reflect.DeepEqual(expSamples, gotSamples) { - errs = append(errs, fmt.Errorf(" expr:'%s', time:%s, \n exp:%#v, \n got:%#v", testCase.Expr, + errs = append(errs, errors.Errorf(" expr:'%s', time:%s, \n exp:%#v, \n got:%#v", testCase.Expr, testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) } } @@ -397,7 +418,7 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q Metric: labels.Labels{}, }}, nil default: - return nil, fmt.Errorf("rule result is not a vector or scalar") + return nil, errors.New("rule result is not a vector or scalar") } } @@ -482,9 +503,3 @@ func parsedSamplesString(pss []parsedSample) string { func (ps *parsedSample) String() string { return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64) } - -type dummyLogger struct{} - -func (l *dummyLogger) Log(keyvals ...interface{}) error { - return nil -} diff --git a/config/config.go b/config/config.go index d12c8d651d1..f4d8d470de9 100644 --- a/config/config.go +++ b/config/config.go @@ -22,12 +22,14 @@ import ( "strings" "time" - "github.com/prometheus/prometheus/pkg/relabel" - + "github.com/pkg/errors" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + yaml "gopkg.in/yaml.v2" + sd_config "github.com/prometheus/prometheus/discovery/config" - "gopkg.in/yaml.v2" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/relabel" ) var ( @@ -58,7 +60,7 @@ func LoadFile(filename string) (*Config, error) { } cfg, err := Load(string(content)) if err != nil { - return nil, fmt.Errorf("parsing YAML file %s: %v", filename, err) + return nil, errors.Wrapf(err, "parsing YAML file %s", filename) } resolveFilepaths(filepath.Dir(filename), cfg) return cfg, nil @@ -82,9 +84,10 @@ var ( DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout and ScrapeInterval default to the // configured globals. - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -107,9 +110,10 @@ var ( MinShards: 1, MaxSamplesPerSend: 100, - // By default, buffer 100 batches, which at 100ms per batch is 10s. At - // 1000 shards, this will buffer 10M samples total. - Capacity: 100 * 100, + // Each shard will have a max of 10 samples pending in it's channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 110 samples + // per shard pending. At 1000 shards that's 110k. + Capacity: 10, BatchSendDeadline: model.Duration(5 * time.Second), // Max number of times to retry a batch on recoverable errors. @@ -152,30 +156,34 @@ func resolveFilepaths(baseDir string, cfg *Config) { cfg.RuleFiles[i] = join(rf) } + tlsPaths := func(cfg *config_util.TLSConfig) { + cfg.CAFile = join(cfg.CAFile) + cfg.CertFile = join(cfg.CertFile) + cfg.KeyFile = join(cfg.KeyFile) + } clientPaths := func(scfg *config_util.HTTPClientConfig) { + if scfg.BasicAuth != nil { + scfg.BasicAuth.PasswordFile = join(scfg.BasicAuth.PasswordFile) + } scfg.BearerTokenFile = join(scfg.BearerTokenFile) - scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile) - scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile) - scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile) + tlsPaths(&scfg.TLSConfig) } sdPaths := func(cfg *sd_config.ServiceDiscoveryConfig) { for _, kcfg := range cfg.KubernetesSDConfigs { - kcfg.BearerTokenFile = join(kcfg.BearerTokenFile) - kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile) - kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile) - kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile) + clientPaths(&kcfg.HTTPClientConfig) } for _, mcfg := range cfg.MarathonSDConfigs { mcfg.AuthTokenFile = join(mcfg.AuthTokenFile) - mcfg.HTTPClientConfig.BearerTokenFile = join(mcfg.HTTPClientConfig.BearerTokenFile) - mcfg.HTTPClientConfig.TLSConfig.CAFile = join(mcfg.HTTPClientConfig.TLSConfig.CAFile) - mcfg.HTTPClientConfig.TLSConfig.CertFile = join(mcfg.HTTPClientConfig.TLSConfig.CertFile) - mcfg.HTTPClientConfig.TLSConfig.KeyFile = join(mcfg.HTTPClientConfig.TLSConfig.KeyFile) + clientPaths(&mcfg.HTTPClientConfig) } for _, consulcfg := range cfg.ConsulSDConfigs { - consulcfg.TLSConfig.CAFile = join(consulcfg.TLSConfig.CAFile) - consulcfg.TLSConfig.CertFile = join(consulcfg.TLSConfig.CertFile) - consulcfg.TLSConfig.KeyFile = join(consulcfg.TLSConfig.KeyFile) + tlsPaths(&consulcfg.TLSConfig) + } + for _, cfg := range cfg.OpenstackSDConfigs { + tlsPaths(&cfg.TLSConfig) + } + for _, cfg := range cfg.TritonSDConfigs { + tlsPaths(&cfg.TLSConfig) } for _, filecfg := range cfg.FileSDConfigs { for i, fn := range filecfg.Files { @@ -192,6 +200,12 @@ func resolveFilepaths(baseDir string, cfg *Config) { clientPaths(&cfg.HTTPClientConfig) sdPaths(&cfg.ServiceDiscoveryConfig) } + for _, cfg := range cfg.RemoteReadConfigs { + clientPaths(&cfg.HTTPClientConfig) + } + for _, cfg := range cfg.RemoteWriteConfigs { + clientPaths(&cfg.HTTPClientConfig) + } } func (c Config) String() string { @@ -221,14 +235,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { for _, rf := range c.RuleFiles { if !patRulePath.MatchString(rf) { - return fmt.Errorf("invalid rule file path %q", rf) + return errors.Errorf("invalid rule file path %q", rf) } } // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { if scfg == nil { - return fmt.Errorf("empty or null scrape config section") + return errors.New("empty or null scrape config section") } // First set the correct scrape interval, then check that the timeout // (inferred or explicit) is not greater than that. @@ -236,7 +250,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval } if scfg.ScrapeTimeout > scfg.ScrapeInterval { - return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) + return errors.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) } if scfg.ScrapeTimeout == 0 { if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { @@ -247,18 +261,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } if _, ok := jobNames[scfg.JobName]; ok { - return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + return errors.Errorf("found multiple scrape configs with job name %q", scfg.JobName) } jobNames[scfg.JobName] = struct{}{} } for _, rwcfg := range c.RemoteWriteConfigs { if rwcfg == nil { - return fmt.Errorf("empty or null remote write config section") + return errors.New("empty or null remote write config section") } } for _, rrcfg := range c.RemoteReadConfigs { if rrcfg == nil { - return fmt.Errorf("empty or null remote read config section") + return errors.New("empty or null remote read config section") } } return nil @@ -274,7 +288,7 @@ type GlobalConfig struct { // How frequently to evaluate rules by default. EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. - ExternalLabels model.LabelSet `yaml:"external_labels,omitempty"` + ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -287,13 +301,22 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } + for _, l := range gc.ExternalLabels { + if !model.LabelName(l.Name).IsValid() { + return errors.Errorf("%q is not a valid label name", l.Name) + } + if !model.LabelValue(l.Value).IsValid() { + return errors.Errorf("%q is not a valid label value", l.Value) + } + } + // First set the correct scrape interval, then check that the timeout // (inferred or explicit) is not greater than that. if gc.ScrapeInterval == 0 { gc.ScrapeInterval = DefaultGlobalConfig.ScrapeInterval } if gc.ScrapeTimeout > gc.ScrapeInterval { - return fmt.Errorf("global scrape timeout greater than scrape interval") + return errors.New("global scrape timeout greater than scrape interval") } if gc.ScrapeTimeout == 0 { if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval { @@ -323,6 +346,8 @@ type ScrapeConfig struct { JobName string `yaml:"job_name"` // Indicator whether the scraped metrics should remain unmodified. HonorLabels bool `yaml:"honor_labels,omitempty"` + // Indicator whether the scraped timestamps should be respected. + HonorTimestamps bool `yaml:"honor_timestamps"` // A set of query parameters with which the target is scraped. Params url.Values `yaml:"params,omitempty"` // How frequently to scrape the targets of this scrape config. @@ -357,7 +382,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if len(c.JobName) == 0 { - return fmt.Errorf("job_name is empty") + return errors.New("job_name is empty") } // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. @@ -387,12 +412,12 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { for _, rlcfg := range c.RelabelConfigs { if rlcfg == nil { - return fmt.Errorf("empty or null target relabeling rule in scrape config") + return errors.New("empty or null target relabeling rule in scrape config") } } for _, rlcfg := range c.MetricRelabelConfigs { if rlcfg == nil { - return fmt.Errorf("empty or null metric relabeling rule in scrape config") + return errors.New("empty or null metric relabeling rule in scrape config") } } @@ -423,7 +448,7 @@ func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error for _, rlcfg := range c.AlertRelabelConfigs { if rlcfg == nil { - return fmt.Errorf("empty or null alert relabeling rule") + return errors.New("empty or null alert relabeling rule") } } return nil @@ -483,7 +508,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er for _, rlcfg := range c.RelabelConfigs { if rlcfg == nil { - return fmt.Errorf("empty or null Alertmanager target relabeling rule") + return errors.New("empty or null Alertmanager target relabeling rule") } } @@ -500,7 +525,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er func CheckTargetAddress(address model.LabelValue) error { // For now check for a URL, we may want to expand this later. if strings.Contains(string(address), "/") { - return fmt.Errorf("%q is not a valid hostname", address) + return errors.Errorf("%q is not a valid hostname", address) } return nil } @@ -537,11 +562,11 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } if c.URL == nil { - return fmt.Errorf("url for remote_write is empty") + return errors.New("url for remote_write is empty") } for _, rlcfg := range c.WriteRelabelConfigs { if rlcfg == nil { - return fmt.Errorf("empty or null relabeling rule in remote write config") + return errors.New("empty or null relabeling rule in remote write config") } } @@ -599,7 +624,7 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro return err } if c.URL == nil { - return fmt.Errorf("url for remote_read is empty") + return errors.New("url for remote_read is empty") } // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. // We cannot make it a pointer as the parser panics for inlined pointer structs. diff --git a/config/config_test.go b/config/config_test.go index 5aa860c65cb..9d35e9e5332 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -23,9 +23,13 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/pkg/relabel" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery/azure" + sd_config "github.com/prometheus/prometheus/discovery/config" "github.com/prometheus/prometheus/discovery/consul" "github.com/prometheus/prometheus/discovery/dns" "github.com/prometheus/prometheus/discovery/ec2" @@ -36,12 +40,9 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/triton" "github.com/prometheus/prometheus/discovery/zookeeper" - - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - sd_config "github.com/prometheus/prometheus/discovery/config" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/util/testutil" - "gopkg.in/yaml.v2" ) func mustParseURL(u string) *config_util.URL { @@ -58,9 +59,9 @@ var expectedConf = &Config{ ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, EvaluationInterval: model.Duration(30 * time.Second), - ExternalLabels: model.LabelSet{ - "monitor": "codelab", - "foo": "bar", + ExternalLabels: labels.Labels{ + {Name: "foo", Value: "bar"}, + {Name: "monitor", Value: "codelab"}, }, }, @@ -88,6 +89,12 @@ var expectedConf = &Config{ URL: mustParseURL("http://remote2/push"), RemoteTimeout: model.Duration(30 * time.Second), QueueConfig: DefaultQueueConfig, + HTTPClientConfig: config_util.HTTPClientConfig{ + TLSConfig: config_util.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, + }, }, }, @@ -102,6 +109,12 @@ var expectedConf = &Config{ RemoteTimeout: model.Duration(1 * time.Minute), ReadRecent: false, RequiredMatchers: model.LabelSet{"job": "special"}, + HTTPClientConfig: config_util.HTTPClientConfig{ + TLSConfig: config_util.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, + }, }, }, @@ -109,9 +122,10 @@ var expectedConf = &Config{ { JobName: "prometheus", - HonorLabels: true, - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorLabels: true, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -181,9 +195,10 @@ var expectedConf = &Config{ JobName: "service-x", - ScrapeInterval: model.Duration(50 * time.Second), - ScrapeTimeout: model.Duration(5 * time.Second), - SampleLimit: 1000, + HonorTimestamps: true, + ScrapeInterval: model.Duration(50 * time.Second), + ScrapeTimeout: model.Duration(5 * time.Second), + SampleLimit: 1000, HTTPClientConfig: config_util.HTTPClientConfig{ BasicAuth: &config_util.BasicAuth{ @@ -270,8 +285,9 @@ var expectedConf = &Config{ { JobName: "service-y", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -282,7 +298,7 @@ var expectedConf = &Config{ Server: "localhost:1234", Token: "mysecret", Services: []string{"nginx", "cache", "mysql"}, - ServiceTag: "canary", + ServiceTags: []string{"canary", "v1"}, NodeMeta: map[string]string{"rack": "123"}, TagSeparator: consul.DefaultSDConfig.TagSeparator, Scheme: "https", @@ -312,8 +328,9 @@ var expectedConf = &Config{ { JobName: "service-z", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), MetricsPath: "/metrics", Scheme: "http", @@ -330,8 +347,9 @@ var expectedConf = &Config{ { JobName: "service-kubernetes", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -341,9 +359,15 @@ var expectedConf = &Config{ { APIServer: kubernetesSDHostURL(), Role: kubernetes.RoleEndpoint, - BasicAuth: &config_util.BasicAuth{ - Username: "myusername", - Password: "mysecret", + HTTPClientConfig: config_util.HTTPClientConfig{ + BasicAuth: &config_util.BasicAuth{ + Username: "myusername", + Password: "mysecret", + }, + TLSConfig: config_util.TLSConfig{ + CertFile: filepath.FromSlash("testdata/valid_cert_file"), + KeyFile: filepath.FromSlash("testdata/valid_key_file"), + }, }, NamespaceDiscovery: kubernetes.NamespaceDiscovery{}, }, @@ -353,11 +377,18 @@ var expectedConf = &Config{ { JobName: "service-kubernetes-namespaces", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, + HTTPClientConfig: config_util.HTTPClientConfig{ + BasicAuth: &config_util.BasicAuth{ + Username: "myusername", + PasswordFile: filepath.FromSlash("testdata/valid_password_file"), + }, + }, ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ KubernetesSDConfigs: []*kubernetes.SDConfig{ @@ -376,8 +407,9 @@ var expectedConf = &Config{ { JobName: "service-marathon", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -403,8 +435,9 @@ var expectedConf = &Config{ { JobName: "service-ec2", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -435,8 +468,9 @@ var expectedConf = &Config{ { JobName: "service-azure", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -459,8 +493,9 @@ var expectedConf = &Config{ { JobName: "service-nerve", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -478,8 +513,9 @@ var expectedConf = &Config{ { JobName: "0123service-xxx", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -495,11 +531,33 @@ var expectedConf = &Config{ }, }, }, + { + JobName: "badfederation", + + HonorTimestamps: false, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + + MetricsPath: "/federate", + Scheme: DefaultScrapeConfig.Scheme, + + ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ + StaticConfigs: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + {model.AddressLabel: "localhost:9090"}, + }, + Source: "0", + }, + }, + }, + }, { JobName: "測試", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -518,8 +576,9 @@ var expectedConf = &Config{ { JobName: "service-triton", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -545,8 +604,9 @@ var expectedConf = &Config{ { JobName: "service-openstack", - ScrapeInterval: model.Duration(15 * time.Second), - ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, + HonorTimestamps: true, + ScrapeInterval: model.Duration(15 * time.Second), + ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout, MetricsPath: DefaultScrapeConfig.MetricsPath, Scheme: DefaultScrapeConfig.Scheme, @@ -559,9 +619,9 @@ var expectedConf = &Config{ Port: 80, RefreshInterval: model.Duration(60 * time.Second), TLSConfig: config_util.TLSConfig{ - CAFile: "valid_ca_file", - CertFile: "valid_cert_file", - KeyFile: "valid_key_file", + CAFile: "testdata/valid_ca_file", + CertFile: "testdata/valid_cert_file", + KeyFile: "testdata/valid_key_file", }, }, }, @@ -601,7 +661,7 @@ func TestLoadConfig(t *testing.T) { testutil.Ok(t, err) expectedConf.original = c.original - testutil.Equals(t, expectedConf, c) + assert.Equal(t, expectedConf, c) } // YAML marshaling must not reveal authentication credentials. @@ -630,6 +690,11 @@ func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { testutil.Equals(t, ruleFilesExpectedConf, c) } +func TestKubernetesEmptyAPIServer(t *testing.T) { + _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml") + testutil.Ok(t, err) +} + var expectedErrors = []struct { filename string errMsg string @@ -649,6 +714,9 @@ var expectedErrors = []struct { }, { filename: "labelname2.bad.yml", errMsg: `"not:allowed" is not a valid label name`, + }, { + filename: "labelvalue.bad.yml", + errMsg: `"\xff" is not a valid label value`, }, { filename: "regex.bad.yml", errMsg: "error parsing regexp", @@ -700,6 +768,9 @@ var expectedErrors = []struct { }, { filename: "bearertoken_basicauth.bad.yml", errMsg: "at most one of basic_auth, bearer_token & bearer_token_file must be configured", + }, { + filename: "kubernetes_http_config_without_api_server.bad.yml", + errMsg: "to use custom HTTP client configuration please provide the 'api_server' URL explicitly", }, { filename: "kubernetes_bearertoken.bad.yml", errMsg: "at most one of bearer_token & bearer_token_file must be configured", diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 2db750d10b7..6993217805d 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -19,6 +19,9 @@ remote_write: regex: expensive.* action: drop - url: http://remote2/push + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file remote_read: - url: http://remote1/read @@ -27,6 +30,9 @@ remote_read: read_recent: false required_matchers: job: special + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file scrape_configs: - job_name: prometheus @@ -122,7 +128,7 @@ scrape_configs: - server: 'localhost:1234' token: mysecret services: ['nginx', 'cache', 'mysql'] - tag: "canary" + tags: ["canary", "v1"] node_meta: rack: "123" allow_stale: true @@ -153,6 +159,9 @@ scrape_configs: kubernetes_sd_configs: - role: endpoints api_server: 'https://localhost:1234' + tls_config: + cert_file: valid_cert_file + key_file: valid_key_file basic_auth: username: 'myusername' @@ -167,6 +176,11 @@ scrape_configs: names: - default + basic_auth: + username: 'myusername' + password_file: valid_password_file + + - job_name: service-marathon marathon_sd_configs: - servers: @@ -216,6 +230,13 @@ scrape_configs: - targets: - localhost:9090 +- job_name: badfederation + honor_timestamps: false + metrics_path: /federate + static_configs: + - targets: + - localhost:9090 + - job_name: 測試 metrics_path: /metrics static_configs: @@ -231,8 +252,8 @@ scrape_configs: refresh_interval: 1m version: 1 tls_config: - cert_file: testdata/valid_cert_file - key_file: testdata/valid_key_file + cert_file: valid_cert_file + key_file: valid_key_file - job_name: service-openstack openstack_sd_configs: diff --git a/config/testdata/kubernetes_empty_apiserver.good.yml b/config/testdata/kubernetes_empty_apiserver.good.yml new file mode 100644 index 00000000000..12b428eb841 --- /dev/null +++ b/config/testdata/kubernetes_empty_apiserver.good.yml @@ -0,0 +1,4 @@ +scrape_configs: +- job_name: prometheus + kubernetes_sd_configs: + - role: endpoints diff --git a/config/testdata/kubernetes_http_config_without_api_server.bad.yml b/config/testdata/kubernetes_http_config_without_api_server.bad.yml new file mode 100644 index 00000000000..db442c3bd19 --- /dev/null +++ b/config/testdata/kubernetes_http_config_without_api_server.bad.yml @@ -0,0 +1,5 @@ +scrape_configs: + - job_name: prometheus + kubernetes_sd_configs: + - role: pod + bearer_token: 1234 diff --git a/config/testdata/labelvalue.bad.yml b/config/testdata/labelvalue.bad.yml new file mode 100644 index 00000000000..7873eb1743e --- /dev/null +++ b/config/testdata/labelvalue.bad.yml @@ -0,0 +1,3 @@ +global: + external_labels: + name: !!binary "/w==" \ No newline at end of file diff --git a/console_libraries/menu.lib b/console_libraries/menu.lib index 929dc362a3f..199ebf9f480 100644 --- a/console_libraries/menu.lib +++ b/console_libraries/menu.lib @@ -2,33 +2,37 @@ {{/* Navbar, should be passed . */}} {{ define "navbar" }} -