Merge branch 'master' into dev-v2
This commit is contained in:
commit
7fdeda3bf5
|
@ -0,0 +1,5 @@
|
||||||
|
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- staticcheck
|
||||||
|
disable-all: true
|
12
.travis.yml
12
.travis.yml
|
@ -1,13 +1,13 @@
|
||||||
sudo: false
|
|
||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.7.x # See README.md for current minimum version.
|
- 1.9.x # See README.md for current minimum version.
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
- 1.10.x
|
||||||
- 1.11.x
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- make check_license style unused test-short
|
- make check_license unused test-short
|
||||||
- if [[ ! $TRAVIS_GO_VERSION =~ ^1\.(7|8|9)\.[x0-9]+$ ]]; then make staticcheck; fi
|
- if [[ ! $TRAVIS_GO_VERSION =~ ^1\.9\.[x0-9]+$ ]]; then make lint; fi
|
||||||
|
# Style is only checked against the latest supported Go version.
|
||||||
|
- if [[ $TRAVIS_GO_VERSION =~ ^1\.(12)\. ]]; then make style; fi
|
||||||
|
|
29
CHANGELOG.md
29
CHANGELOG.md
|
@ -1,3 +1,32 @@
|
||||||
|
## 0.9.3 / 2019-05-16
|
||||||
|
* [CHANGE] Required Go version is now 1.9+. #561
|
||||||
|
* [FEATURE] API client: Add POST with get fallback for Query/QueryRange. #557
|
||||||
|
* [FEATURE] API client: Add alerts endpoint. #552
|
||||||
|
* [FEATURE] API client: Add rules endpoint. #508
|
||||||
|
* [FEATURE] push: Add option to pick metrics format. #540
|
||||||
|
* [ENHANCEMENT] Limit time the Go collector may take to collect memstats,
|
||||||
|
returning results from the previous collection in case of a timeout. #568
|
||||||
|
* [ENHANCEMENT] Pusher now requires only a thin interface instead of a full
|
||||||
|
`http.Client`, facilitating mocking and custom HTTP client implementation.
|
||||||
|
#559
|
||||||
|
* [ENHANCEMENT] Memory usage improvement for histograms and summaries without
|
||||||
|
objectives. #536
|
||||||
|
* [ENHANCEMENT] Summaries without objectives are now lock-free. #521
|
||||||
|
* [BUGFIX] promhttp: `InstrumentRoundTripperTrace` now takes into account a pre-set context. #582
|
||||||
|
* [BUGFIX] `TestCounterAddLarge` now works on all platforms. #567
|
||||||
|
* [BUGFIX] Fix `promhttp` examples. #535 #544
|
||||||
|
* [BUGFIX] API client: Wait for done before writing to shared response
|
||||||
|
body. #532
|
||||||
|
* [BUGFIX] API client: Deal with discovered labels properly. #529
|
||||||
|
|
||||||
|
## 0.9.2 / 2018-12-06
|
||||||
|
* [FEATURE] Support for Go modules. #501
|
||||||
|
* [FEATURE] `Timer.ObserveDuration` returns observed duration. #509
|
||||||
|
* [ENHANCEMENT] Improved doc comments and error messages. #504
|
||||||
|
* [BUGFIX] Fix race condition during metrics gathering. #512
|
||||||
|
* [BUGFIX] Fix testutil metric comparison for Histograms and empty labels. #494
|
||||||
|
#498
|
||||||
|
|
||||||
## 0.9.1 / 2018-11-03
|
## 0.9.1 / 2018-11-03
|
||||||
* [FEATURE] Add `WriteToTextfile` function to facilitate the creation of
|
* [FEATURE] Add `WriteToTextfile` function to facilitate the creation of
|
||||||
*.prom files for the textfile collector of the node exporter. #489
|
*.prom files for the textfile collector of the node exporter. #489
|
||||||
|
|
9
Makefile
9
Makefile
|
@ -20,13 +20,8 @@ STATICCHECK_IGNORE = \
|
||||||
github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go:SA1019 \
|
github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go:SA1019 \
|
||||||
github.com/prometheus/client_golang/prometheus/http.go:SA1019
|
github.com/prometheus/client_golang/prometheus/http.go:SA1019
|
||||||
|
|
||||||
.PHONY: get_dep
|
|
||||||
get_dep:
|
|
||||||
@echo ">> getting dependencies"
|
|
||||||
$(GO) get -t ./...
|
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: get_dep common-test
|
test: deps common-test
|
||||||
|
|
||||||
.PHONY: test-short
|
.PHONY: test-short
|
||||||
test-short: get_dep common-test-short
|
test-short: deps common-test-short
|
||||||
|
|
194
Makefile.common
194
Makefile.common
|
@ -16,7 +16,7 @@
|
||||||
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
||||||
|
|
||||||
# Example usage :
|
# Example usage :
|
||||||
# Create the main Makefile in the root project directory.
|
# Create the main Makefile in the root project directory.
|
||||||
# include Makefile.common
|
# include Makefile.common
|
||||||
# customTarget:
|
# customTarget:
|
||||||
# @echo ">> Running customTarget"
|
# @echo ">> Running customTarget"
|
||||||
|
@ -28,24 +28,87 @@ unexport GOBIN
|
||||||
GO ?= go
|
GO ?= go
|
||||||
GOFMT ?= $(GO)fmt
|
GOFMT ?= $(GO)fmt
|
||||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||||
|
GOOPTS ?=
|
||||||
|
GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||||
|
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||||
|
|
||||||
|
GO_VERSION ?= $(shell $(GO) version)
|
||||||
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||||
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
|
GOVENDOR :=
|
||||||
|
GO111MODULE :=
|
||||||
|
ifeq (, $(PRE_GO_111))
|
||||||
|
ifneq (,$(wildcard go.mod))
|
||||||
|
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||||
|
GO111MODULE := on
|
||||||
|
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
# Always use the local vendor/ directory to satisfy the dependencies.
|
||||||
|
GOOPTS := $(GOOPTS) -mod=vendor
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
ifneq (,$(wildcard go.mod))
|
||||||
|
ifneq (,$(wildcard vendor))
|
||||||
|
$(warning This repository requires Go >= 1.11 because of Go modules)
|
||||||
|
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
||||||
|
endif
|
||||||
|
else
|
||||||
|
# This repository isn't using Go modules (yet).
|
||||||
|
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||||
|
endif
|
||||||
|
endif
|
||||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
|
||||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
|
||||||
pkgs = ./...
|
pkgs = ./...
|
||||||
|
|
||||||
|
ifeq (arm, $(GOHOSTARCH))
|
||||||
|
GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
|
||||||
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
|
||||||
|
else
|
||||||
|
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||||
|
endif
|
||||||
|
|
||||||
|
PROMU_VERSION ?= 0.4.0
|
||||||
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
|
GOLANGCI_LINT :=
|
||||||
|
GOLANGCI_LINT_OPTS ?=
|
||||||
|
GOLANGCI_LINT_VERSION ?= v1.16.0
|
||||||
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
|
# windows isn't included here because of the path separator being different.
|
||||||
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||||
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
PREFIX ?= $(shell pwd)
|
PREFIX ?= $(shell pwd)
|
||||||
BIN_DIR ?= $(shell pwd)
|
BIN_DIR ?= $(shell pwd)
|
||||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
DOCKER_REPO ?= prom
|
DOCKER_REPO ?= prom
|
||||||
|
|
||||||
.PHONY: all
|
DOCKER_ARCHS ?= amd64
|
||||||
all: style staticcheck unused build test
|
|
||||||
|
BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
||||||
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
|
ifeq ($(GOHOSTARCH),amd64)
|
||||||
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
|
# Only supported on amd64
|
||||||
|
test-flags := -race
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
# This rule is used to forward a target like "build" to "common-build". This
|
# This rule is used to forward a target like "build" to "common-build". This
|
||||||
# allows a new "build" target to be defined in a Makefile which includes this
|
# allows a new "build" target to be defined in a Makefile which includes this
|
||||||
# one and override "common-build" without override warnings.
|
# one and override "common-build" without override warnings.
|
||||||
%: common-% ;
|
%: common-% ;
|
||||||
|
|
||||||
|
.PHONY: common-all
|
||||||
|
common-all: precheck style check_license lint unused build test
|
||||||
|
|
||||||
.PHONY: common-style
|
.PHONY: common-style
|
||||||
common-style:
|
common-style:
|
||||||
@echo ">> checking code style"
|
@echo ">> checking code style"
|
||||||
|
@ -67,66 +130,143 @@ common-check_license:
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
.PHONY: common-deps
|
||||||
|
common-deps:
|
||||||
|
@echo ">> getting dependencies"
|
||||||
|
ifdef GO111MODULE
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
||||||
|
else
|
||||||
|
$(GO) get $(GOOPTS) -t ./...
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: common-test-short
|
.PHONY: common-test-short
|
||||||
common-test-short:
|
common-test-short:
|
||||||
@echo ">> running short tests"
|
@echo ">> running short tests"
|
||||||
$(GO) test -short $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-test
|
.PHONY: common-test
|
||||||
common-test:
|
common-test:
|
||||||
@echo ">> running all tests"
|
@echo ">> running all tests"
|
||||||
$(GO) test -race $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-format
|
.PHONY: common-format
|
||||||
common-format:
|
common-format:
|
||||||
@echo ">> formatting code"
|
@echo ">> formatting code"
|
||||||
$(GO) fmt $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-vet
|
.PHONY: common-vet
|
||||||
common-vet:
|
common-vet:
|
||||||
@echo ">> vetting code"
|
@echo ">> vetting code"
|
||||||
$(GO) vet $(pkgs)
|
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
|
.PHONY: common-lint
|
||||||
|
common-lint: $(GOLANGCI_LINT)
|
||||||
|
ifdef GOLANGCI_LINT
|
||||||
|
@echo ">> running golangci-lint"
|
||||||
|
ifdef GO111MODULE
|
||||||
|
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||||
|
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
|
else
|
||||||
|
$(GOLANGCI_LINT) run $(pkgs)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
|
# For backward-compatibility.
|
||||||
.PHONY: common-staticcheck
|
.PHONY: common-staticcheck
|
||||||
common-staticcheck: $(STATICCHECK)
|
common-staticcheck: lint
|
||||||
@echo ">> running staticcheck"
|
|
||||||
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
|
||||||
|
|
||||||
.PHONY: common-unused
|
.PHONY: common-unused
|
||||||
common-unused: $(GOVENDOR)
|
common-unused: $(GOVENDOR)
|
||||||
|
ifdef GOVENDOR
|
||||||
@echo ">> running check for unused packages"
|
@echo ">> running check for unused packages"
|
||||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||||
|
else
|
||||||
|
ifdef GO111MODULE
|
||||||
|
@echo ">> running check for unused/missing packages in go.mod"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||||
|
ifeq (,$(wildcard vendor))
|
||||||
|
@git diff --exit-code -- go.sum go.mod
|
||||||
|
else
|
||||||
|
@echo ">> running check for unused packages in vendor/"
|
||||||
|
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||||
|
@git diff --exit-code -- go.sum go.mod vendor/
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: common-build
|
.PHONY: common-build
|
||||||
common-build: promu
|
common-build: promu
|
||||||
@echo ">> building binaries"
|
@echo ">> building binaries"
|
||||||
$(PROMU) build --prefix $(PREFIX)
|
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||||
|
|
||||||
.PHONY: common-tarball
|
.PHONY: common-tarball
|
||||||
common-tarball: promu
|
common-tarball: promu
|
||||||
@echo ">> building release tarball"
|
@echo ">> building release tarball"
|
||||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||||
|
|
||||||
.PHONY: common-docker
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker:
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||||
|
--build-arg ARCH="$*" \
|
||||||
|
--build-arg OS="linux" \
|
||||||
|
.
|
||||||
|
|
||||||
.PHONY: common-docker-publish
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||||
common-docker-publish:
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||||
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: common-docker-tag-latest
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||||
common-docker-tag-latest:
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||||
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
|
|
||||||
|
.PHONY: common-docker-manifest
|
||||||
|
common-docker-manifest:
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||||
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: promu
|
.PHONY: promu
|
||||||
promu:
|
promu: $(PROMU)
|
||||||
GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu
|
|
||||||
|
|
||||||
.PHONY: $(STATICCHECK)
|
$(PROMU):
|
||||||
$(STATICCHECK):
|
$(eval PROMU_TMP := $(shell mktemp -d))
|
||||||
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
|
||||||
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
|
cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
|
||||||
|
rm -r $(PROMU_TMP)
|
||||||
|
|
||||||
|
.PHONY: proto
|
||||||
|
proto:
|
||||||
|
@echo ">> generating code from proto files"
|
||||||
|
@./scripts/genproto.sh
|
||||||
|
|
||||||
|
ifdef GOLANGCI_LINT
|
||||||
|
$(GOLANGCI_LINT):
|
||||||
|
mkdir -p $(FIRST_GOPATH)/bin
|
||||||
|
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef GOVENDOR
|
||||||
.PHONY: $(GOVENDOR)
|
.PHONY: $(GOVENDOR)
|
||||||
$(GOVENDOR):
|
$(GOVENDOR):
|
||||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: precheck
|
||||||
|
precheck::
|
||||||
|
|
||||||
|
define PRECHECK_COMMAND_template =
|
||||||
|
precheck:: $(1)_precheck
|
||||||
|
|
||||||
|
PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
|
||||||
|
.PHONY: $(1)_precheck
|
||||||
|
$(1)_precheck:
|
||||||
|
@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
|
||||||
|
echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
endef
|
||||||
|
|
74
README.md
74
README.md
|
@ -9,55 +9,43 @@ This is the [Go](http://golang.org) client library for
|
||||||
instrumenting application code, and one for creating clients that talk to the
|
instrumenting application code, and one for creating clients that talk to the
|
||||||
Prometheus HTTP API.
|
Prometheus HTTP API.
|
||||||
|
|
||||||
__This library requires Go1.7 or later.__
|
__This library requires Go1.9 or later.__
|
||||||
|
|
||||||
## Important note about releases, versioning, tagging, and stability
|
## Important note about releases, versioning, tagging, and stability
|
||||||
|
|
||||||
While our goal is to follow [Semantic Versioning](https://semver.org/), this
|
In this repository, we used to mostly ignore the many coming and going
|
||||||
repository is still pre-1.0.0. To quote the
|
dependency management tools for Go and instead wait for a tool that most of the
|
||||||
[Semantic Versioning spec](https://semver.org/#spec-item-4): “Anything may
|
community would converge on. Our bet is that this tool has arrived now in the
|
||||||
change at any time. The public API should not be considered stable.” We know
|
form of [Go
|
||||||
that this is at odds with the widespread use of this library. However, just
|
Modules](https://github.com/golang/go/wiki/Modules#how-to-upgrade-and-downgrade-dependencies).
|
||||||
declaring something 1.0.0 doesn't make it 1.0.0. Instead, we are working
|
|
||||||
towards a 1.0.0 release that actually deserves its major version number.
|
|
||||||
|
|
||||||
Having said that, we aim for always keeping the tip of master in a workable
|
To make full use of what Go Modules are offering, the previous versioning
|
||||||
state. We occasionally tag versions and track their changes in CHANGELOG.md,
|
roadmap for this repository had to be changed. In particular, Go Modules
|
||||||
but this happens mostly to keep dependency management tools happy and to give
|
finally provide a way for incompatible versions of the same package to coexist
|
||||||
people a handle they can talk about easily. In particular, all commits in the
|
in the same binary. For that, however, the versions must be tagged with
|
||||||
master branch have passed the same testing and reviewing. There is no QA
|
different major versions of 1 or greater (following [Semantic
|
||||||
process in place that would render tagged commits more stable or better tested
|
Versioning](https://semver.org/)). Thus, we decided to abandon the original
|
||||||
than others.
|
plan of introducing a lot of breaking changes _before_ releasing v1 of this
|
||||||
|
repository, mostly driven by the widespread use this repository already has and
|
||||||
|
the relatively stable state it is in.
|
||||||
|
|
||||||
There is a plan behind the current (pre-1.0.0) versioning, though:
|
To leverage the mechanism Go Modules offers for a transition between major
|
||||||
|
version, the current plan is the following:
|
||||||
|
|
||||||
- v0.9 is the “production release”, currently tracked in the master
|
- The v0.9.x series of releases will see a small number of bugfix releases to
|
||||||
branch. “Patch” releases will usually be just bug fixes, indeed, but
|
deal with a few remaining minor issues (#543, #542, #539).
|
||||||
important new features that do not require invasive code changes might also
|
- After that, all features currently marked as _deprecated_ will be removed,
|
||||||
be included in those. We do not plan any breaking changes from one v0.9.x
|
and the result will be released as v1.0.0.
|
||||||
release to any later v0.9.y release, but nothing is guaranteed. Since the
|
- The planned breaking changes previously gathered as part of the v0.10
|
||||||
master branch will eventually be switched over to track the upcoming v0.10
|
milestone will now go into the v2 milestone. The v2 development happens in a
|
||||||
(see below), we recommend to tell your dependency management tool of choice
|
[separate branch](https://github.com/prometheus/client_golang/tree/dev-v2)
|
||||||
to use the latest v0.9.x release, at least for your production software. In
|
for the time being. v2 releases off that branch will happen once sufficient
|
||||||
that way, you should get bug fixes and non-invasive, low-risk new features
|
stability is reached. v1 and v2 will coexist for a while to enable a
|
||||||
without the need to change anything on your part.
|
convenient transition.
|
||||||
- v0.10 is a planned release that will have a _lot_ of breaking changes
|
- The API client in prometheus/client_golang/api/… is still considered
|
||||||
(despite being only a “minor” release in the Semantic Versioning terminology,
|
experimental. While it will be tagged alongside the rest of the code
|
||||||
but as said, pre-1.0.0 means nothing is guaranteed). Essentially, we have
|
according to the plan above, we cannot strictly guarantee semver semantics
|
||||||
been piling up feature requests that require breaking changes for a while,
|
for it.
|
||||||
and they are all collected in the
|
|
||||||
[v0.10 milestone](https://github.com/prometheus/client_golang/milestone/2).
|
|
||||||
Since there will be so many breaking changes, the development for v0.10 is
|
|
||||||
currently not happening in the master branch, but in the
|
|
||||||
[dev-0.10 branch](https://github.com/prometheus/client_golang/tree/dev-0.10).
|
|
||||||
It will violently change for a while, and it will definitely be in a
|
|
||||||
non-working state now and then. It should only be used for sneak-peaks and
|
|
||||||
discussions of the new features and designs.
|
|
||||||
- Once v0.10 is ready for real-life use, it will be merged into the master
|
|
||||||
branch (which is the reason why you should lock your dependency management
|
|
||||||
tool to v0.9.x and only migrate to v0.10 when both you and v0.10 are ready
|
|
||||||
for it). In the ideal case, v0.10 will be the basis for the future v1.0
|
|
||||||
release, but we cannot provide an ETA at this time.
|
|
||||||
|
|
||||||
## Instrumenting applications
|
## Instrumenting applications
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
// Package api provides clients for the HTTP APIs.
|
// Package api provides clients for the HTTP APIs.
|
||||||
package api
|
package api
|
||||||
|
|
||||||
|
@ -60,6 +58,28 @@ type Client interface {
|
||||||
Do(context.Context, *http.Request) (*http.Response, []byte, error)
|
Do(context.Context, *http.Request) (*http.Response, []byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DoGetFallback will attempt to do the request as-is, and on a 405 it will fallback to a GET request.
|
||||||
|
func DoGetFallback(c Client, ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, error) {
|
||||||
|
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
|
||||||
|
resp, body, err := c.Do(ctx, req)
|
||||||
|
if resp != nil && resp.StatusCode == http.StatusMethodNotAllowed {
|
||||||
|
u.RawQuery = args.Encode()
|
||||||
|
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
return resp, body, err
|
||||||
|
}
|
||||||
|
return c.Do(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
// NewClient returns a new Client.
|
// NewClient returns a new Client.
|
||||||
//
|
//
|
||||||
// It is safe to use the returned Client from multiple goroutines.
|
// It is safe to use the returned Client from multiple goroutines.
|
||||||
|
@ -119,8 +139,8 @@ func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response,
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
err = resp.Body.Close()
|
|
||||||
<-done
|
<-done
|
||||||
|
err = resp.Body.Close()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = ctx.Err()
|
err = ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,14 +11,17 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/prometheus/tsdb/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConfig(t *testing.T) {
|
func TestConfig(t *testing.T) {
|
||||||
|
@ -113,3 +116,70 @@ func TestClientURL(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDoGetFallback(t *testing.T) {
|
||||||
|
v := url.Values{"a": []string{"1", "2"}}
|
||||||
|
|
||||||
|
type testResponse struct {
|
||||||
|
Values string
|
||||||
|
Method string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start a local HTTP server.
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
req.ParseForm()
|
||||||
|
r := &testResponse{
|
||||||
|
Values: req.Form.Encode(),
|
||||||
|
Method: req.Method,
|
||||||
|
}
|
||||||
|
|
||||||
|
body, _ := json.Marshal(r)
|
||||||
|
|
||||||
|
if req.Method == http.MethodPost {
|
||||||
|
if req.URL.Path == "/blockPost" {
|
||||||
|
http.Error(w, string(body), http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write(body)
|
||||||
|
}))
|
||||||
|
// Close the server when test finishes.
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
u, err := url.Parse(server.URL)
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
client := &httpClient{client: *(server.Client())}
|
||||||
|
|
||||||
|
// Do a post, and ensure that the post succeeds.
|
||||||
|
_, b, err := DoGetFallback(client, context.TODO(), u, v)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error doing local request: %v", err)
|
||||||
|
}
|
||||||
|
resp := &testResponse{}
|
||||||
|
if err := json.Unmarshal(b, resp); err != nil {
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
}
|
||||||
|
if resp.Method != http.MethodPost {
|
||||||
|
t.Fatalf("Mismatch method")
|
||||||
|
}
|
||||||
|
if resp.Values != v.Encode() {
|
||||||
|
t.Fatalf("Mismatch in values")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do a fallbcak to a get.
|
||||||
|
u.Path = "/blockPost"
|
||||||
|
_, b, err = DoGetFallback(client, context.TODO(), u, v)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error doing local request: %v", err)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(b, resp); err != nil {
|
||||||
|
testutil.Ok(t, err)
|
||||||
|
}
|
||||||
|
if resp.Method != http.MethodGet {
|
||||||
|
t.Fatalf("Mismatch method")
|
||||||
|
}
|
||||||
|
if resp.Values != v.Encode() {
|
||||||
|
t.Fatalf("Mismatch in values")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -11,8 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
// Package v1 provides bindings to the Prometheus HTTP API v1:
|
// Package v1 provides bindings to the Prometheus HTTP API v1:
|
||||||
// http://prometheus.io/docs/querying/api/
|
// http://prometheus.io/docs/querying/api/
|
||||||
package v1
|
package v1
|
||||||
|
@ -20,6 +18,7 @@ package v1
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -34,12 +33,14 @@ const (
|
||||||
|
|
||||||
apiPrefix = "/api/v1"
|
apiPrefix = "/api/v1"
|
||||||
|
|
||||||
|
epAlerts = apiPrefix + "/alerts"
|
||||||
epAlertManagers = apiPrefix + "/alertmanagers"
|
epAlertManagers = apiPrefix + "/alertmanagers"
|
||||||
epQuery = apiPrefix + "/query"
|
epQuery = apiPrefix + "/query"
|
||||||
epQueryRange = apiPrefix + "/query_range"
|
epQueryRange = apiPrefix + "/query_range"
|
||||||
epLabelValues = apiPrefix + "/label/:name/values"
|
epLabelValues = apiPrefix + "/label/:name/values"
|
||||||
epSeries = apiPrefix + "/series"
|
epSeries = apiPrefix + "/series"
|
||||||
epTargets = apiPrefix + "/targets"
|
epTargets = apiPrefix + "/targets"
|
||||||
|
epRules = apiPrefix + "/rules"
|
||||||
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
|
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
|
||||||
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
|
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
|
||||||
epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
|
epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
|
||||||
|
@ -47,26 +48,49 @@ const (
|
||||||
epFlags = apiPrefix + "/status/flags"
|
epFlags = apiPrefix + "/status/flags"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// AlertState models the state of an alert.
|
||||||
|
type AlertState string
|
||||||
|
|
||||||
// ErrorType models the different API error types.
|
// ErrorType models the different API error types.
|
||||||
type ErrorType string
|
type ErrorType string
|
||||||
|
|
||||||
// HealthStatus models the health status of a scrape target.
|
// HealthStatus models the health status of a scrape target.
|
||||||
type HealthStatus string
|
type HealthStatus string
|
||||||
|
|
||||||
|
// RuleType models the type of a rule.
|
||||||
|
type RuleType string
|
||||||
|
|
||||||
|
// RuleHealth models the health status of a rule.
|
||||||
|
type RuleHealth string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// Possible values for AlertState.
|
||||||
|
AlertStateFiring AlertState = "firing"
|
||||||
|
AlertStateInactive AlertState = "inactive"
|
||||||
|
AlertStatePending AlertState = "pending"
|
||||||
|
|
||||||
// Possible values for ErrorType.
|
// Possible values for ErrorType.
|
||||||
ErrBadData ErrorType = "bad_data"
|
ErrBadData ErrorType = "bad_data"
|
||||||
ErrTimeout = "timeout"
|
ErrTimeout ErrorType = "timeout"
|
||||||
ErrCanceled = "canceled"
|
ErrCanceled ErrorType = "canceled"
|
||||||
ErrExec = "execution"
|
ErrExec ErrorType = "execution"
|
||||||
ErrBadResponse = "bad_response"
|
ErrBadResponse ErrorType = "bad_response"
|
||||||
ErrServer = "server_error"
|
ErrServer ErrorType = "server_error"
|
||||||
ErrClient = "client_error"
|
ErrClient ErrorType = "client_error"
|
||||||
|
|
||||||
// Possible values for HealthStatus.
|
// Possible values for HealthStatus.
|
||||||
HealthGood HealthStatus = "up"
|
HealthGood HealthStatus = "up"
|
||||||
HealthUnknown HealthStatus = "unknown"
|
HealthUnknown HealthStatus = "unknown"
|
||||||
HealthBad HealthStatus = "down"
|
HealthBad HealthStatus = "down"
|
||||||
|
|
||||||
|
// Possible values for RuleType.
|
||||||
|
RuleTypeRecording RuleType = "recording"
|
||||||
|
RuleTypeAlerting RuleType = "alerting"
|
||||||
|
|
||||||
|
// Possible values for RuleHealth.
|
||||||
|
RuleHealthGood = "ok"
|
||||||
|
RuleHealthUnknown = "unknown"
|
||||||
|
RuleHealthBad = "err"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error is an error returned by the API.
|
// Error is an error returned by the API.
|
||||||
|
@ -90,6 +114,8 @@ type Range struct {
|
||||||
|
|
||||||
// API provides bindings for Prometheus's v1 API.
|
// API provides bindings for Prometheus's v1 API.
|
||||||
type API interface {
|
type API interface {
|
||||||
|
// Alerts returns a list of all active alerts.
|
||||||
|
Alerts(ctx context.Context) (AlertsResult, error)
|
||||||
// AlertManagers returns an overview of the current state of the Prometheus alert manager discovery.
|
// AlertManagers returns an overview of the current state of the Prometheus alert manager discovery.
|
||||||
AlertManagers(ctx context.Context) (AlertManagersResult, error)
|
AlertManagers(ctx context.Context) (AlertManagersResult, error)
|
||||||
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
|
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
|
||||||
|
@ -111,10 +137,17 @@ type API interface {
|
||||||
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
|
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
|
||||||
// under the TSDB's data directory and returns the directory as response.
|
// under the TSDB's data directory and returns the directory as response.
|
||||||
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
|
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
|
||||||
|
// Rules returns a list of alerting and recording rules that are currently loaded.
|
||||||
|
Rules(ctx context.Context) (RulesResult, error)
|
||||||
// Targets returns an overview of the current state of the Prometheus target discovery.
|
// Targets returns an overview of the current state of the Prometheus target discovery.
|
||||||
Targets(ctx context.Context) (TargetsResult, error)
|
Targets(ctx context.Context) (TargetsResult, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AlertsResult contains the result from querying the alerts endpoint.
|
||||||
|
type AlertsResult struct {
|
||||||
|
Alerts []Alert `json:"alerts"`
|
||||||
|
}
|
||||||
|
|
||||||
// AlertManagersResult contains the result from querying the alertmanagers endpoint.
|
// AlertManagersResult contains the result from querying the alertmanagers endpoint.
|
||||||
type AlertManagersResult struct {
|
type AlertManagersResult struct {
|
||||||
Active []AlertManager `json:"activeAlertManagers"`
|
Active []AlertManager `json:"activeAlertManagers"`
|
||||||
|
@ -139,6 +172,63 @@ type SnapshotResult struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RulesResult contains the result from querying the rules endpoint.
|
||||||
|
type RulesResult struct {
|
||||||
|
Groups []RuleGroup `json:"groups"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RuleGroup models a rule group that contains a set of recording and alerting rules.
|
||||||
|
type RuleGroup struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
File string `json:"file"`
|
||||||
|
Interval float64 `json:"interval"`
|
||||||
|
Rules Rules `json:"rules"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recording and alerting rules are stored in the same slice to preserve the order
|
||||||
|
// that rules are returned in by the API.
|
||||||
|
//
|
||||||
|
// Rule types can be determined using a type switch:
|
||||||
|
// switch v := rule.(type) {
|
||||||
|
// case RecordingRule:
|
||||||
|
// fmt.Print("got a recording rule")
|
||||||
|
// case AlertingRule:
|
||||||
|
// fmt.Print("got a alerting rule")
|
||||||
|
// default:
|
||||||
|
// fmt.Printf("unknown rule type %s", v)
|
||||||
|
// }
|
||||||
|
type Rules []interface{}
|
||||||
|
|
||||||
|
// AlertingRule models a alerting rule.
|
||||||
|
type AlertingRule struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Query string `json:"query"`
|
||||||
|
Duration float64 `json:"duration"`
|
||||||
|
Labels model.LabelSet `json:"labels"`
|
||||||
|
Annotations model.LabelSet `json:"annotations"`
|
||||||
|
Alerts []*Alert `json:"alerts"`
|
||||||
|
Health RuleHealth `json:"health"`
|
||||||
|
LastError string `json:"lastError,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordingRule models a recording rule.
|
||||||
|
type RecordingRule struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Query string `json:"query"`
|
||||||
|
Labels model.LabelSet `json:"labels,omitempty"`
|
||||||
|
Health RuleHealth `json:"health"`
|
||||||
|
LastError string `json:"lastError,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alert models an active alert.
|
||||||
|
type Alert struct {
|
||||||
|
ActiveAt time.Time `json:"activeAt"`
|
||||||
|
Annotations model.LabelSet
|
||||||
|
Labels model.LabelSet
|
||||||
|
State AlertState
|
||||||
|
Value float64
|
||||||
|
}
|
||||||
|
|
||||||
// TargetsResult contains the result from querying the targets endpoint.
|
// TargetsResult contains the result from querying the targets endpoint.
|
||||||
type TargetsResult struct {
|
type TargetsResult struct {
|
||||||
Active []ActiveTarget `json:"activeTargets"`
|
Active []ActiveTarget `json:"activeTargets"`
|
||||||
|
@ -147,17 +237,17 @@ type TargetsResult struct {
|
||||||
|
|
||||||
// ActiveTarget models an active Prometheus scrape target.
|
// ActiveTarget models an active Prometheus scrape target.
|
||||||
type ActiveTarget struct {
|
type ActiveTarget struct {
|
||||||
DiscoveredLabels model.LabelSet `json:"discoveredLabels"`
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
||||||
Labels model.LabelSet `json:"labels"`
|
Labels model.LabelSet `json:"labels"`
|
||||||
ScrapeURL string `json:"scrapeUrl"`
|
ScrapeURL string `json:"scrapeUrl"`
|
||||||
LastError string `json:"lastError"`
|
LastError string `json:"lastError"`
|
||||||
LastScrape time.Time `json:"lastScrape"`
|
LastScrape time.Time `json:"lastScrape"`
|
||||||
Health HealthStatus `json:"health"`
|
Health HealthStatus `json:"health"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DroppedTarget models a dropped Prometheus scrape target.
|
// DroppedTarget models a dropped Prometheus scrape target.
|
||||||
type DroppedTarget struct {
|
type DroppedTarget struct {
|
||||||
DiscoveredLabels model.LabelSet `json:"discoveredLabels"`
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// queryResult contains result data for a query.
|
// queryResult contains result data for a query.
|
||||||
|
@ -169,6 +259,111 @@ type queryResult struct {
|
||||||
v model.Value
|
v model.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rg *RuleGroup) UnmarshalJSON(b []byte) error {
|
||||||
|
v := struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
File string `json:"file"`
|
||||||
|
Interval float64 `json:"interval"`
|
||||||
|
Rules []json.RawMessage `json:"rules"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rg.Name = v.Name
|
||||||
|
rg.File = v.File
|
||||||
|
rg.Interval = v.Interval
|
||||||
|
|
||||||
|
for _, rule := range v.Rules {
|
||||||
|
alertingRule := AlertingRule{}
|
||||||
|
if err := json.Unmarshal(rule, &alertingRule); err == nil {
|
||||||
|
rg.Rules = append(rg.Rules, alertingRule)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
recordingRule := RecordingRule{}
|
||||||
|
if err := json.Unmarshal(rule, &recordingRule); err == nil {
|
||||||
|
rg.Rules = append(rg.Rules, recordingRule)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return errors.New("failed to decode JSON into an alerting or recording rule")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *AlertingRule) UnmarshalJSON(b []byte) error {
|
||||||
|
v := struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if v.Type == "" {
|
||||||
|
return errors.New("type field not present in rule")
|
||||||
|
}
|
||||||
|
if v.Type != string(RuleTypeAlerting) {
|
||||||
|
return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeAlerting), v.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
rule := struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Query string `json:"query"`
|
||||||
|
Duration float64 `json:"duration"`
|
||||||
|
Labels model.LabelSet `json:"labels"`
|
||||||
|
Annotations model.LabelSet `json:"annotations"`
|
||||||
|
Alerts []*Alert `json:"alerts"`
|
||||||
|
Health RuleHealth `json:"health"`
|
||||||
|
LastError string `json:"lastError,omitempty"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &rule); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Health = rule.Health
|
||||||
|
r.Annotations = rule.Annotations
|
||||||
|
r.Name = rule.Name
|
||||||
|
r.Query = rule.Query
|
||||||
|
r.Alerts = rule.Alerts
|
||||||
|
r.Duration = rule.Duration
|
||||||
|
r.Labels = rule.Labels
|
||||||
|
r.LastError = rule.LastError
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RecordingRule) UnmarshalJSON(b []byte) error {
|
||||||
|
v := struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if v.Type == "" {
|
||||||
|
return errors.New("type field not present in rule")
|
||||||
|
}
|
||||||
|
if v.Type != string(RuleTypeRecording) {
|
||||||
|
return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeRecording), v.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
rule := struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Query string `json:"query"`
|
||||||
|
Labels model.LabelSet `json:"labels,omitempty"`
|
||||||
|
Health RuleHealth `json:"health"`
|
||||||
|
LastError string `json:"lastError,omitempty"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &rule); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Health = rule.Health
|
||||||
|
r.Labels = rule.Labels
|
||||||
|
r.Name = rule.Name
|
||||||
|
r.LastError = rule.LastError
|
||||||
|
r.Query = rule.Query
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (qr *queryResult) UnmarshalJSON(b []byte) error {
|
func (qr *queryResult) UnmarshalJSON(b []byte) error {
|
||||||
v := struct {
|
v := struct {
|
||||||
Type model.ValueType `json:"resultType"`
|
Type model.ValueType `json:"resultType"`
|
||||||
|
@ -213,6 +408,24 @@ type httpAPI struct {
|
||||||
client api.Client
|
client api.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) {
|
||||||
|
u := h.client.URL(epAlerts, nil)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return AlertsResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, body, err := h.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return AlertsResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res AlertsResult
|
||||||
|
err = json.Unmarshal(body, &res)
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
|
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
|
||||||
u := h.client.URL(epAlertManagers, nil)
|
u := h.client.URL(epAlertManagers, nil)
|
||||||
|
|
||||||
|
@ -325,14 +538,7 @@ func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.
|
||||||
q.Set("time", ts.Format(time.RFC3339Nano))
|
q.Set("time", ts.Format(time.RFC3339Nano))
|
||||||
}
|
}
|
||||||
|
|
||||||
u.RawQuery = q.Encode()
|
_, body, err := api.DoGetFallback(h.client, ctx, u, q)
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -358,14 +564,7 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.
|
||||||
q.Set("end", end)
|
q.Set("end", end)
|
||||||
q.Set("step", step)
|
q.Set("step", step)
|
||||||
|
|
||||||
u.RawQuery = q.Encode()
|
_, body, err := api.DoGetFallback(h.client, ctx, u, q)
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, body, err := h.client.Do(ctx, req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -427,6 +626,24 @@ func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult,
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) {
|
||||||
|
u := h.client.URL(epRules, nil)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return RulesResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, body, err := h.client.Do(ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
return RulesResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res RulesResult
|
||||||
|
err = json.Unmarshal(body, &res)
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
||||||
u := h.client.URL(epTargets, nil)
|
u := h.client.URL(epTargets, nil)
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -158,6 +156,12 @@ func TestAPIs(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
doRules := func() func() (interface{}, error) {
|
||||||
|
return func() (interface{}, error) {
|
||||||
|
return promAPI.Rules(context.Background())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
doTargets := func() func() (interface{}, error) {
|
doTargets := func() func() (interface{}, error) {
|
||||||
return func() (interface{}, error) {
|
return func() (interface{}, error) {
|
||||||
return promAPI.Targets(context.Background())
|
return promAPI.Targets(context.Background())
|
||||||
|
@ -175,7 +179,7 @@ func TestAPIs(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
reqMethod: "GET",
|
reqMethod: "POST",
|
||||||
reqPath: "/api/v1/query",
|
reqPath: "/api/v1/query",
|
||||||
reqParam: url.Values{
|
reqParam: url.Values{
|
||||||
"query": []string{"2"},
|
"query": []string{"2"},
|
||||||
|
@ -190,7 +194,7 @@ func TestAPIs(t *testing.T) {
|
||||||
do: doQuery("2", testTime),
|
do: doQuery("2", testTime),
|
||||||
inErr: fmt.Errorf("some error"),
|
inErr: fmt.Errorf("some error"),
|
||||||
|
|
||||||
reqMethod: "GET",
|
reqMethod: "POST",
|
||||||
reqPath: "/api/v1/query",
|
reqPath: "/api/v1/query",
|
||||||
reqParam: url.Values{
|
reqParam: url.Values{
|
||||||
"query": []string{"2"},
|
"query": []string{"2"},
|
||||||
|
@ -208,7 +212,7 @@ func TestAPIs(t *testing.T) {
|
||||||
Detail: "some body",
|
Detail: "some body",
|
||||||
},
|
},
|
||||||
|
|
||||||
reqMethod: "GET",
|
reqMethod: "POST",
|
||||||
reqPath: "/api/v1/query",
|
reqPath: "/api/v1/query",
|
||||||
reqParam: url.Values{
|
reqParam: url.Values{
|
||||||
"query": []string{"2"},
|
"query": []string{"2"},
|
||||||
|
@ -226,7 +230,7 @@ func TestAPIs(t *testing.T) {
|
||||||
Detail: "some body",
|
Detail: "some body",
|
||||||
},
|
},
|
||||||
|
|
||||||
reqMethod: "GET",
|
reqMethod: "POST",
|
||||||
reqPath: "/api/v1/query",
|
reqPath: "/api/v1/query",
|
||||||
reqParam: url.Values{
|
reqParam: url.Values{
|
||||||
"query": []string{"2"},
|
"query": []string{"2"},
|
||||||
|
@ -243,7 +247,7 @@ func TestAPIs(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
inErr: fmt.Errorf("some error"),
|
inErr: fmt.Errorf("some error"),
|
||||||
|
|
||||||
reqMethod: "GET",
|
reqMethod: "POST",
|
||||||
reqPath: "/api/v1/query_range",
|
reqPath: "/api/v1/query_range",
|
||||||
reqParam: url.Values{
|
reqParam: url.Values{
|
||||||
"query": []string{"2"},
|
"query": []string{"2"},
|
||||||
|
@ -460,6 +464,108 @@ func TestAPIs(t *testing.T) {
|
||||||
err: fmt.Errorf("some error"),
|
err: fmt.Errorf("some error"),
|
||||||
},
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
do: doRules(),
|
||||||
|
reqMethod: "GET",
|
||||||
|
reqPath: "/api/v1/rules",
|
||||||
|
inRes: map[string]interface{}{
|
||||||
|
"groups": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"file": "/rules.yaml",
|
||||||
|
"interval": 60,
|
||||||
|
"name": "example",
|
||||||
|
"rules": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"alerts": []map[string]interface{}{
|
||||||
|
{
|
||||||
|
"activeAt": testTime.UTC().Format(time.RFC3339Nano),
|
||||||
|
"annotations": map[string]interface{}{
|
||||||
|
"summary": "High request latency",
|
||||||
|
},
|
||||||
|
"labels": map[string]interface{}{
|
||||||
|
"alertname": "HighRequestLatency",
|
||||||
|
"severity": "page",
|
||||||
|
},
|
||||||
|
"state": "firing",
|
||||||
|
"value": 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"annotations": map[string]interface{}{
|
||||||
|
"summary": "High request latency",
|
||||||
|
},
|
||||||
|
"duration": 600,
|
||||||
|
"health": "ok",
|
||||||
|
"labels": map[string]interface{}{
|
||||||
|
"severity": "page",
|
||||||
|
},
|
||||||
|
"name": "HighRequestLatency",
|
||||||
|
"query": "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5",
|
||||||
|
"type": "alerting",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"health": "ok",
|
||||||
|
"name": "job:http_inprogress_requests:sum",
|
||||||
|
"query": "sum(http_inprogress_requests) by (job)",
|
||||||
|
"type": "recording",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
res: RulesResult{
|
||||||
|
Groups: []RuleGroup{
|
||||||
|
{
|
||||||
|
Name: "example",
|
||||||
|
File: "/rules.yaml",
|
||||||
|
Interval: 60,
|
||||||
|
Rules: []interface{}{
|
||||||
|
AlertingRule{
|
||||||
|
Alerts: []*Alert{
|
||||||
|
{
|
||||||
|
ActiveAt: testTime.UTC(),
|
||||||
|
Annotations: model.LabelSet{
|
||||||
|
"summary": "High request latency",
|
||||||
|
},
|
||||||
|
Labels: model.LabelSet{
|
||||||
|
"alertname": "HighRequestLatency",
|
||||||
|
"severity": "page",
|
||||||
|
},
|
||||||
|
State: AlertStateFiring,
|
||||||
|
Value: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Annotations: model.LabelSet{
|
||||||
|
"summary": "High request latency",
|
||||||
|
},
|
||||||
|
Labels: model.LabelSet{
|
||||||
|
"severity": "page",
|
||||||
|
},
|
||||||
|
Duration: 600,
|
||||||
|
Health: RuleHealthGood,
|
||||||
|
Name: "HighRequestLatency",
|
||||||
|
Query: "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5",
|
||||||
|
LastError: "",
|
||||||
|
},
|
||||||
|
RecordingRule{
|
||||||
|
Health: RuleHealthGood,
|
||||||
|
Name: "job:http_inprogress_requests:sum",
|
||||||
|
Query: "sum(http_inprogress_requests) by (job)",
|
||||||
|
LastError: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
do: doRules(),
|
||||||
|
reqMethod: "GET",
|
||||||
|
reqPath: "/api/v1/rules",
|
||||||
|
inErr: fmt.Errorf("some error"),
|
||||||
|
err: fmt.Errorf("some error"),
|
||||||
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
do: doTargets(),
|
do: doTargets(),
|
||||||
reqMethod: "GET",
|
reqMethod: "GET",
|
||||||
|
@ -497,7 +603,7 @@ func TestAPIs(t *testing.T) {
|
||||||
res: TargetsResult{
|
res: TargetsResult{
|
||||||
Active: []ActiveTarget{
|
Active: []ActiveTarget{
|
||||||
{
|
{
|
||||||
DiscoveredLabels: model.LabelSet{
|
DiscoveredLabels: map[string]string{
|
||||||
"__address__": "127.0.0.1:9090",
|
"__address__": "127.0.0.1:9090",
|
||||||
"__metrics_path__": "/metrics",
|
"__metrics_path__": "/metrics",
|
||||||
"__scheme__": "http",
|
"__scheme__": "http",
|
||||||
|
@ -515,7 +621,7 @@ func TestAPIs(t *testing.T) {
|
||||||
},
|
},
|
||||||
Dropped: []DroppedTarget{
|
Dropped: []DroppedTarget{
|
||||||
{
|
{
|
||||||
DiscoveredLabels: model.LabelSet{
|
DiscoveredLabels: map[string]string{
|
||||||
"__address__": "127.0.0.1:9100",
|
"__address__": "127.0.0.1:9100",
|
||||||
"__metrics_path__": "/metrics",
|
"__metrics_path__": "/metrics",
|
||||||
"__scheme__": "http",
|
"__scheme__": "http",
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
module github.com/prometheus/client_golang
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/beorn7/perks v1.0.0
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0 // indirect
|
||||||
|
github.com/golang/protobuf v1.3.1
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90
|
||||||
|
github.com/prometheus/common v0.4.0
|
||||||
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084
|
||||||
|
github.com/prometheus/tsdb v0.7.1
|
||||||
|
)
|
|
@ -0,0 +1,62 @@
|
||||||
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
|
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
||||||
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||||
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -79,7 +79,7 @@ type Collector interface {
|
||||||
// of the Describe method. If a Collector sometimes collects no metrics at all
|
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||||
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||||
// metrics after a metric with a fully specified label set has been accessed),
|
// metrics after a metric with a fully specified label set has been accessed),
|
||||||
// it might even get registered as an unchecked Collecter (cf. the Register
|
// it might even get registered as an unchecked Collector (cf. the Register
|
||||||
// method of the Registerer interface). Hence, only use this shortcut
|
// method of the Registerer interface). Hence, only use this shortcut
|
||||||
// implementation of Describe if you are certain to fulfill the contract.
|
// implementation of Describe if you are certain to fulfill the contract.
|
||||||
//
|
//
|
||||||
|
|
|
@ -172,7 +172,7 @@ func TestCounterAddLarge(t *testing.T) {
|
||||||
}).(*counter)
|
}).(*counter)
|
||||||
|
|
||||||
// large overflows the underlying type and should therefore be stored in valBits.
|
// large overflows the underlying type and should therefore be stored in valBits.
|
||||||
large := float64(math.MaxUint64 + 1)
|
large := math.Nextafter(float64(math.MaxUint64), 1e20)
|
||||||
counter.Add(large)
|
counter.Add(large)
|
||||||
if expected, got := large, math.Float64frombits(counter.valBits); expected != got {
|
if expected, got := large, math.Float64frombits(counter.valBits); expected != got {
|
||||||
t.Errorf("valBits expected %f, got %f.", expected, got)
|
t.Errorf("valBits expected %f, got %f.", expected, got)
|
||||||
|
|
|
@ -93,7 +93,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||||
// First add only the const label names and sort them...
|
// First add only the const label names and sort them...
|
||||||
for labelName := range constLabels {
|
for labelName := range constLabels {
|
||||||
if !checkLabelName(labelName) {
|
if !checkLabelName(labelName) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, labelName)
|
labelNames = append(labelNames, labelName)
|
||||||
|
@ -115,7 +115,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||||
// dimension with a different mix between preset and variable labels.
|
// dimension with a different mix between preset and variable labels.
|
||||||
for _, labelName := range variableLabels {
|
for _, labelName := range variableLabels {
|
||||||
if !checkLabelName(labelName) {
|
if !checkLabelName(labelName) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name", labelName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, "$"+labelName)
|
labelNames = append(labelNames, "$"+labelName)
|
||||||
|
|
|
@ -122,13 +122,13 @@
|
||||||
// the Collect method. The Describe method has to return separate Desc
|
// the Collect method. The Describe method has to return separate Desc
|
||||||
// instances, representative of the “throw-away” metrics to be created later.
|
// instances, representative of the “throw-away” metrics to be created later.
|
||||||
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||||
// could return no Desc at all, which will marke the Collector “unchecked”. No
|
// could return no Desc at all, which will mark the Collector “unchecked”. No
|
||||||
// checks are porformed at registration time, but metric consistency will still
|
// checks are performed at registration time, but metric consistency will still
|
||||||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||||
// implementer of the Collector. While this is not a desirable state, it is
|
// implementer of the Collector. While this is not a desirable state, it is
|
||||||
// sometimes necessary. The typical use case is a situatios where the exact
|
// sometimes necessary. The typical use case is a situation where the exact
|
||||||
// metrics to be returned by a Collector cannot be predicted at registration
|
// metrics to be returned by a Collector cannot be predicted at registration
|
||||||
// time, but the implementer has sufficient knowledge of the whole system to
|
// time, but the implementer has sufficient knowledge of the whole system to
|
||||||
// guarantee metric consistency.
|
// guarantee metric consistency.
|
||||||
|
|
|
@ -13,7 +13,13 @@
|
||||||
|
|
||||||
package prometheus_test
|
package prometheus_test
|
||||||
|
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
import (
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
// ClusterManager is an example for a system that might have been built without
|
// ClusterManager is an example for a system that might have been built without
|
||||||
// Prometheus in mind. It models a central manager of jobs running in a
|
// Prometheus in mind. It models a central manager of jobs running in a
|
||||||
|
@ -124,4 +130,13 @@ func ExampleCollector() {
|
||||||
// variables to then do something with them.
|
// variables to then do something with them.
|
||||||
NewClusterManager("db", reg)
|
NewClusterManager("db", reg)
|
||||||
NewClusterManager("ca", reg)
|
NewClusterManager("ca", reg)
|
||||||
|
|
||||||
|
// Add the standard process and Go metrics to the custom registry.
|
||||||
|
reg.MustRegister(
|
||||||
|
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||||
|
prometheus.NewGoCollector(),
|
||||||
|
)
|
||||||
|
|
||||||
|
http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))
|
||||||
|
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||||
}
|
}
|
||||||
|
|
|
@ -214,16 +214,11 @@ func ExampleRegister() {
|
||||||
|
|
||||||
// A different (and somewhat tricky) approach is to use
|
// A different (and somewhat tricky) approach is to use
|
||||||
// ConstLabels. ConstLabels are pairs of label names and label values
|
// ConstLabels. ConstLabels are pairs of label names and label values
|
||||||
// that never change. You might ask what those labels are good for (and
|
// that never change. Each worker creates and registers an own Counter
|
||||||
// rightfully so - if they never change, they could as well be part of
|
// instance where the only difference is in the value of the
|
||||||
// the metric name). There are essentially two use-cases: The first is
|
// ConstLabels. Those Counters can all be registered because the
|
||||||
// if labels are constant throughout the lifetime of a binary execution,
|
// different ConstLabel values guarantee that each worker will increment
|
||||||
// but they vary over time or between different instances of a running
|
// a different Counter metric.
|
||||||
// binary. The second is what we have here: Each worker creates and
|
|
||||||
// registers an own Counter instance where the only difference is in the
|
|
||||||
// value of the ConstLabels. Those Counters can all be registered
|
|
||||||
// because the different ConstLabel values guarantee that each worker
|
|
||||||
// will increment a different Counter metric.
|
|
||||||
counterOpts := prometheus.CounterOpts{
|
counterOpts := prometheus.CounterOpts{
|
||||||
Subsystem: "worker_pool",
|
Subsystem: "worker_pool",
|
||||||
Name: "completed_tasks",
|
Name: "completed_tasks",
|
||||||
|
|
|
@ -14,9 +14,9 @@
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -26,16 +26,41 @@ type goCollector struct {
|
||||||
gcDesc *Desc
|
gcDesc *Desc
|
||||||
goInfoDesc *Desc
|
goInfoDesc *Desc
|
||||||
|
|
||||||
// metrics to describe and collect
|
// ms... are memstats related.
|
||||||
metrics memStatsMetrics
|
msLast *runtime.MemStats // Previously collected memstats.
|
||||||
|
msLastTimestamp time.Time
|
||||||
|
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||||
|
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||||
|
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGoCollector returns a collector which exports metrics about the current Go
|
// NewGoCollector returns a collector which exports metrics about the current Go
|
||||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||||
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
// is called. This requires to “stop the world”, which usually only happens for
|
||||||
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
// garbage collection (GC). Take the following implications into account when
|
||||||
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
// deciding whether to use the Go collector:
|
||||||
|
//
|
||||||
|
// 1. The performance impact of stopping the world is the more relevant the more
|
||||||
|
// frequently metrics are collected. However, with Go1.9 or later the
|
||||||
|
// stop-the-world time per metrics collection is very short (~25µs) so that the
|
||||||
|
// performance impact will only matter in rare cases. However, with older Go
|
||||||
|
// versions, the stop-the-world duration depends on the heap size and can be
|
||||||
|
// quite significant (~1.7 ms/GiB as per
|
||||||
// https://go-review.googlesource.com/c/go/+/34937).
|
// https://go-review.googlesource.com/c/go/+/34937).
|
||||||
|
//
|
||||||
|
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
|
||||||
|
// metrics collection happens to coincide with GC, it will only complete after
|
||||||
|
// GC has finished. Usually, GC is fast enough to not cause problems. However,
|
||||||
|
// with a very large heap, GC might take multiple seconds, which is enough to
|
||||||
|
// cause scrape timeouts in common setups. To avoid this problem, the Go
|
||||||
|
// collector will use the memstats from a previous collection if
|
||||||
|
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
|
||||||
|
// collected memstats, or their collection is more than 5m ago, the collection
|
||||||
|
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
|
||||||
|
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
|
||||||
|
// issue.)
|
||||||
func NewGoCollector() Collector {
|
func NewGoCollector() Collector {
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
goroutinesDesc: NewDesc(
|
goroutinesDesc: NewDesc(
|
||||||
|
@ -54,7 +79,11 @@ func NewGoCollector() Collector {
|
||||||
"go_info",
|
"go_info",
|
||||||
"Information about the Go environment.",
|
"Information about the Go environment.",
|
||||||
nil, Labels{"version": runtime.Version()}),
|
nil, Labels{"version": runtime.Version()}),
|
||||||
metrics: memStatsMetrics{
|
msLast: &runtime.MemStats{},
|
||||||
|
msRead: runtime.ReadMemStats,
|
||||||
|
msMaxWait: time.Second,
|
||||||
|
msMaxAge: 5 * time.Minute,
|
||||||
|
msMetrics: memStatsMetrics{
|
||||||
{
|
{
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("alloc_bytes"),
|
memstatNamespace("alloc_bytes"),
|
||||||
|
@ -253,7 +282,7 @@ func NewGoCollector() Collector {
|
||||||
}
|
}
|
||||||
|
|
||||||
func memstatNamespace(s string) string {
|
func memstatNamespace(s string) string {
|
||||||
return fmt.Sprintf("go_memstats_%s", s)
|
return "go_memstats_" + s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
|
@ -262,13 +291,27 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.threadsDesc
|
ch <- c.threadsDesc
|
||||||
ch <- c.gcDesc
|
ch <- c.gcDesc
|
||||||
ch <- c.goInfoDesc
|
ch <- c.goInfoDesc
|
||||||
for _, i := range c.metrics {
|
for _, i := range c.msMetrics {
|
||||||
ch <- i.desc
|
ch <- i.desc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
var (
|
||||||
|
ms = &runtime.MemStats{}
|
||||||
|
done = make(chan struct{})
|
||||||
|
)
|
||||||
|
// Start reading memstats first as it might take a while.
|
||||||
|
go func() {
|
||||||
|
c.msRead(ms)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
c.msLast = ms
|
||||||
|
c.msLastTimestamp = time.Now()
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
n, _ := runtime.ThreadCreateProfile(nil)
|
n, _ := runtime.ThreadCreateProfile(nil)
|
||||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||||
|
@ -286,9 +329,31 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
|
||||||
ms := &runtime.MemStats{}
|
timer := time.NewTimer(c.msMaxWait)
|
||||||
runtime.ReadMemStats(ms)
|
select {
|
||||||
for _, i := range c.metrics {
|
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||||
|
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
return
|
||||||
|
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||||
|
}
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||||
|
// Last memstats are recent enough. Collect from them under the lock.
|
||||||
|
c.msCollect(ch, c.msLast)
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If we are here, the last memstats are too old or don't exist. We have
|
||||||
|
// to wait until our own ReadMemStats finally completes. For that to
|
||||||
|
// happen, we have to release the lock.
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
<-done
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,28 +21,40 @@ import (
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGoCollector(t *testing.T) {
|
func TestGoCollectorGoroutines(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
c = NewGoCollector()
|
c = NewGoCollector()
|
||||||
ch = make(chan Metric)
|
metricCh = make(chan Metric)
|
||||||
waitc = make(chan struct{})
|
waitCh = make(chan struct{})
|
||||||
closec = make(chan struct{})
|
endGoroutineCh = make(chan struct{})
|
||||||
old = -1
|
endCollectionCh = make(chan struct{})
|
||||||
|
old = -1
|
||||||
)
|
)
|
||||||
defer close(closec)
|
defer func() {
|
||||||
|
close(endGoroutineCh)
|
||||||
|
// Drain the collect channel to prevent goroutine leak.
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-metricCh:
|
||||||
|
case <-endCollectionCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
c.Collect(ch)
|
c.Collect(metricCh)
|
||||||
go func(c <-chan struct{}) {
|
go func(c <-chan struct{}) {
|
||||||
<-c
|
<-c
|
||||||
}(closec)
|
}(endGoroutineCh)
|
||||||
<-waitc
|
<-waitCh
|
||||||
c.Collect(ch)
|
c.Collect(metricCh)
|
||||||
|
close(endCollectionCh)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case m := <-ch:
|
case m := <-metricCh:
|
||||||
// m can be Gauge or Counter,
|
// m can be Gauge or Counter,
|
||||||
// currently just test the go_goroutines Gauge
|
// currently just test the go_goroutines Gauge
|
||||||
// and ignore others.
|
// and ignore others.
|
||||||
|
@ -57,7 +69,7 @@ func TestGoCollector(t *testing.T) {
|
||||||
|
|
||||||
if old == -1 {
|
if old == -1 {
|
||||||
old = int(pb.GetGauge().GetValue())
|
old = int(pb.GetGauge().GetValue())
|
||||||
close(waitc)
|
close(waitCh)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,43 +77,47 @@ func TestGoCollector(t *testing.T) {
|
||||||
// TODO: This is flaky in highly concurrent situations.
|
// TODO: This is flaky in highly concurrent situations.
|
||||||
t.Errorf("want 1 new goroutine, got %d", diff)
|
t.Errorf("want 1 new goroutine, got %d", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GoCollector performs three sends per call.
|
|
||||||
// On line 27 we need to receive three more sends
|
|
||||||
// to shut down cleanly.
|
|
||||||
<-ch
|
|
||||||
<-ch
|
|
||||||
<-ch
|
|
||||||
return
|
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(1 * time.Second):
|
||||||
t.Fatalf("expected collect timed out")
|
t.Fatalf("expected collect timed out")
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGCCollector(t *testing.T) {
|
func TestGoCollectorGC(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
c = NewGoCollector()
|
c = NewGoCollector()
|
||||||
ch = make(chan Metric)
|
metricCh = make(chan Metric)
|
||||||
waitc = make(chan struct{})
|
waitCh = make(chan struct{})
|
||||||
closec = make(chan struct{})
|
endCollectionCh = make(chan struct{})
|
||||||
oldGC uint64
|
oldGC uint64
|
||||||
oldPause float64
|
oldPause float64
|
||||||
)
|
)
|
||||||
defer close(closec)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
c.Collect(ch)
|
c.Collect(metricCh)
|
||||||
// force GC
|
// force GC
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
<-waitc
|
<-waitCh
|
||||||
c.Collect(ch)
|
c.Collect(metricCh)
|
||||||
|
close(endCollectionCh)
|
||||||
|
}()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// Drain the collect channel to prevent goroutine leak.
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-metricCh:
|
||||||
|
case <-endCollectionCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
first := true
|
first := true
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case metric := <-ch:
|
case metric := <-metricCh:
|
||||||
pb := &dto.Metric{}
|
pb := &dto.Metric{}
|
||||||
metric.Write(pb)
|
metric.Write(pb)
|
||||||
if pb.GetSummary() == nil {
|
if pb.GetSummary() == nil {
|
||||||
|
@ -119,7 +135,7 @@ func TestGCCollector(t *testing.T) {
|
||||||
first = false
|
first = false
|
||||||
oldGC = *pb.GetSummary().SampleCount
|
oldGC = *pb.GetSummary().SampleCount
|
||||||
oldPause = *pb.GetSummary().SampleSum
|
oldPause = *pb.GetSummary().SampleSum
|
||||||
close(waitc)
|
close(waitCh)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
|
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
|
||||||
|
@ -128,9 +144,102 @@ func TestGCCollector(t *testing.T) {
|
||||||
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
|
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
|
||||||
t.Errorf("want moar pause, got %f", diff)
|
t.Errorf("want moar pause, got %f", diff)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(1 * time.Second):
|
||||||
t.Fatalf("expected collect timed out")
|
t.Fatalf("expected collect timed out")
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGoCollectorMemStats(t *testing.T) {
|
||||||
|
var (
|
||||||
|
c = NewGoCollector().(*goCollector)
|
||||||
|
got uint64
|
||||||
|
)
|
||||||
|
|
||||||
|
checkCollect := func(want uint64) {
|
||||||
|
metricCh := make(chan Metric)
|
||||||
|
endCh := make(chan struct{})
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
c.Collect(metricCh)
|
||||||
|
close(endCh)
|
||||||
|
}()
|
||||||
|
Collect:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case metric := <-metricCh:
|
||||||
|
if metric.Desc().fqName != "go_memstats_alloc_bytes" {
|
||||||
|
continue Collect
|
||||||
|
}
|
||||||
|
pb := &dto.Metric{}
|
||||||
|
metric.Write(pb)
|
||||||
|
got = uint64(pb.GetGauge().GetValue())
|
||||||
|
case <-endCh:
|
||||||
|
break Collect
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if want != got {
|
||||||
|
t.Errorf("unexpected value of go_memstats_alloc_bytes, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Speed up the timing to make the tast faster.
|
||||||
|
c.msMaxWait = time.Millisecond
|
||||||
|
c.msMaxAge = 10 * time.Millisecond
|
||||||
|
|
||||||
|
// Scenario 1: msRead responds slowly, no previous memstats available,
|
||||||
|
// msRead is executed anyway.
|
||||||
|
c.msRead = func(ms *runtime.MemStats) {
|
||||||
|
time.Sleep(3 * time.Millisecond)
|
||||||
|
ms.Alloc = 1
|
||||||
|
}
|
||||||
|
checkCollect(1)
|
||||||
|
// Now msLast is set.
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if want, got := uint64(1), c.msLast.Alloc; want != got {
|
||||||
|
t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
|
||||||
|
// Scenario 2: msRead responds fast, previous memstats available, new
|
||||||
|
// value collected.
|
||||||
|
c.msRead = func(ms *runtime.MemStats) {
|
||||||
|
ms.Alloc = 2
|
||||||
|
}
|
||||||
|
checkCollect(2)
|
||||||
|
// msLast is set, too.
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if want, got := uint64(2), c.msLast.Alloc; want != got {
|
||||||
|
t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
|
||||||
|
// Scenario 3: msRead responds slowly, previous memstats available, old
|
||||||
|
// value collected.
|
||||||
|
c.msRead = func(ms *runtime.MemStats) {
|
||||||
|
time.Sleep(3 * time.Millisecond)
|
||||||
|
ms.Alloc = 3
|
||||||
|
}
|
||||||
|
checkCollect(2)
|
||||||
|
// After waiting, new value is still set in msLast.
|
||||||
|
time.Sleep(12 * time.Millisecond)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if want, got := uint64(3), c.msLast.Alloc; want != got {
|
||||||
|
t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
|
||||||
|
// Scenario 4: msRead responds slowly, previous memstats is too old, new
|
||||||
|
// value collected.
|
||||||
|
c.msRead = func(ms *runtime.MemStats) {
|
||||||
|
time.Sleep(3 * time.Millisecond)
|
||||||
|
ms.Alloc = 4
|
||||||
|
}
|
||||||
|
checkCollect(4)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if want, got := uint64(4), c.msLast.Alloc; want != got {
|
||||||
|
t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ package graphite
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -26,7 +27,6 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ package graphite
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
@ -26,7 +27,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
|
@ -204,8 +204,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make counts
|
// Finally we know the final length of h.upperBounds and can make buckets
|
||||||
// for both states:
|
// for both counts:
|
||||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||||
|
|
||||||
|
@ -224,18 +224,21 @@ type histogramCounts struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type histogram struct {
|
type histogram struct {
|
||||||
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||||
// observations, we need to save the total count of observations again,
|
// The most significant bit is the hot index [0 or 1] of the count field
|
||||||
// combined with the index of the currently-hot counts struct, so that
|
// below. Observe calls update the hot one. All remaining bits count the
|
||||||
// we can perform the operation on both values atomically. The least
|
// number of Observe calls. Observe starts by incrementing this counter,
|
||||||
// significant bit defines the hot counts struct. The remaining 63 bits
|
// and finish by incrementing the count field in the respective
|
||||||
// represent the total count of observations. This happens under the
|
// histogramCounts, as a marker for completion.
|
||||||
// assumption that the 63bit count will never overflow. Rationale: An
|
|
||||||
// observations takes about 30ns. Let's assume it could happen in
|
|
||||||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
|
||||||
// which is about 3000 years.
|
|
||||||
//
|
//
|
||||||
// This has to be first in the struct for 64bit alignment. See
|
// Calls of the Write method (which are non-mutating reads from the
|
||||||
|
// perspective of the histogram) swap the hot–cold under the writeMtx
|
||||||
|
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||||
|
// observations with the initiation count. Once they match, then the
|
||||||
|
// last observation on the now cool one has completed. All cool fields must
|
||||||
|
// be merged into the new hot before releasing writeMtx.
|
||||||
|
//
|
||||||
|
// Fields with atomic access first! See alignment constraint:
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
countAndHotIdx uint64
|
countAndHotIdx uint64
|
||||||
|
|
||||||
|
@ -243,16 +246,14 @@ type histogram struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
writeMtx sync.Mutex // Only used in the Write method.
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
|
|
||||||
upperBounds []float64
|
|
||||||
|
|
||||||
// Two counts, one is "hot" for lock-free observations, the other is
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
counts [2]*histogramCounts
|
counts [2]*histogramCounts
|
||||||
hotIdx int // Index of currently-hot counts. Only used within Write.
|
|
||||||
|
|
||||||
labelPairs []*dto.LabelPair
|
upperBounds []float64
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Desc() *Desc {
|
func (h *histogram) Desc() *Desc {
|
||||||
|
@ -271,11 +272,11 @@ func (h *histogram) Observe(v float64) {
|
||||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||||
|
|
||||||
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
// We increment h.countAndHotIdx so that the counter in the lower
|
||||||
// 63 bits gets incremented by 1. At the same time, we get the new value
|
// 63 bits gets incremented. At the same time, we get the new value
|
||||||
// back, which we can use to find the currently-hot counts.
|
// back, which we can use to find the currently-hot counts.
|
||||||
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||||
hotCounts := h.counts[n%2]
|
hotCounts := h.counts[n>>63]
|
||||||
|
|
||||||
if i < len(h.upperBounds) {
|
if i < len(h.upperBounds) {
|
||||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||||
|
@ -293,72 +294,43 @@ func (h *histogram) Observe(v float64) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *histogram) Write(out *dto.Metric) error {
|
func (h *histogram) Write(out *dto.Metric) error {
|
||||||
var (
|
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||||
his = &dto.Histogram{}
|
// the hot path, i.e. Observe is called much more often than Write. The
|
||||||
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
// complication of making Write lock-free isn't worth it, if possible at
|
||||||
hotCounts, coldCounts *histogramCounts
|
// all.
|
||||||
count uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
// For simplicity, we mutex the rest of this method. It is not in the
|
|
||||||
// hot path, i.e. Observe is called much more often than Write. The
|
|
||||||
// complication of making Write lock-free isn't worth it.
|
|
||||||
h.writeMtx.Lock()
|
h.writeMtx.Lock()
|
||||||
defer h.writeMtx.Unlock()
|
defer h.writeMtx.Unlock()
|
||||||
|
|
||||||
// This is a bit arcane, which is why the following spells out this if
|
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||||
// clause in English:
|
// without touching the count bits. See the struct comments for a full
|
||||||
//
|
// description of the algorithm.
|
||||||
// If the currently-hot counts struct is #0, we atomically increment
|
n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
|
||||||
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
// count is contained unchanged in the lower 63 bits.
|
||||||
// struct #1. Furthermore, the atomic increment gives us the new value,
|
count := n & ((1 << 63) - 1)
|
||||||
// which, in its most significant 63 bits, tells us the count of
|
// The most significant bit tells us which counts is hot. The complement
|
||||||
// observations done so far up to and including currently ongoing
|
// is thus the cold one.
|
||||||
// observations still using the counts struct just changed from hot to
|
hotCounts := h.counts[n>>63]
|
||||||
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
coldCounts := h.counts[(^n)>>63]
|
||||||
// save the result in count. We also set h.hotIdx to 1 for the next
|
|
||||||
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
|
||||||
// #0 as coldCounts.
|
|
||||||
//
|
|
||||||
// If the currently-hot counts struct is #1, we do the corresponding
|
|
||||||
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
|
||||||
// (which is a bit arcane in itself, as we have to express -1 with an
|
|
||||||
// unsigned int...).
|
|
||||||
if h.hotIdx == 0 {
|
|
||||||
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
|
||||||
h.hotIdx = 1
|
|
||||||
hotCounts = h.counts[1]
|
|
||||||
coldCounts = h.counts[0]
|
|
||||||
} else {
|
|
||||||
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
|
||||||
h.hotIdx = 0
|
|
||||||
hotCounts = h.counts[0]
|
|
||||||
coldCounts = h.counts[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we have to wait for the now-declared-cold counts to actually cool
|
// Await cooldown.
|
||||||
// down, i.e. wait for all observations still using it to finish. That's
|
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||||
// the case once the count in the cold counts struct is the same as the
|
|
||||||
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
|
||||||
for {
|
|
||||||
if count == atomic.LoadUint64(&coldCounts.count) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
runtime.Gosched() // Let observations get work done.
|
runtime.Gosched() // Let observations get work done.
|
||||||
}
|
}
|
||||||
|
|
||||||
his.SampleCount = proto.Uint64(count)
|
his := &dto.Histogram{
|
||||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
|
||||||
|
SampleCount: proto.Uint64(count),
|
||||||
|
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||||
|
}
|
||||||
var cumCount uint64
|
var cumCount uint64
|
||||||
for i, upperBound := range h.upperBounds {
|
for i, upperBound := range h.upperBounds {
|
||||||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||||
buckets[i] = &dto.Bucket{
|
his.Bucket[i] = &dto.Bucket{
|
||||||
CumulativeCount: proto.Uint64(cumCount),
|
CumulativeCount: proto.Uint64(cumCount),
|
||||||
UpperBound: proto.Float64(upperBound),
|
UpperBound: proto.Float64(upperBound),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
his.Bucket = buckets
|
|
||||||
out.Histogram = his
|
out.Histogram = his
|
||||||
out.Label = h.labelPairs
|
out.Label = h.labelPairs
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,6 @@ type delegator interface {
|
||||||
type responseWriterDelegator struct {
|
type responseWriterDelegator struct {
|
||||||
http.ResponseWriter
|
http.ResponseWriter
|
||||||
|
|
||||||
handler, method string
|
|
||||||
status int
|
status int
|
||||||
written int64
|
written int64
|
||||||
wroteHeader bool
|
wroteHeader bool
|
||||||
|
@ -75,8 +74,11 @@ type closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||||
type flusherDelegator struct{ *responseWriterDelegator }
|
type flusherDelegator struct{ *responseWriterDelegator }
|
||||||
type hijackerDelegator struct{ *responseWriterDelegator }
|
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||||
|
type pusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
|
||||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||||
|
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||||
|
//remove support from client_golang yet.
|
||||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||||
}
|
}
|
||||||
func (d flusherDelegator) Flush() {
|
func (d flusherDelegator) Flush() {
|
||||||
|
@ -93,6 +95,9 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||||
d.written += n
|
d.written += n
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||||
|
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||||
|
}
|
||||||
|
|
||||||
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
|
||||||
|
|
||||||
|
@ -196,4 +201,157 @@ func init() {
|
||||||
http.CloseNotifier
|
http.CloseNotifier
|
||||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
}
|
}
|
||||||
|
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||||
|
return pusherDelegator{d}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
|
}
|
||||||
|
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||||
|
return struct {
|
||||||
|
*responseWriterDelegator
|
||||||
|
http.Pusher
|
||||||
|
io.ReaderFrom
|
||||||
|
http.Hijacker
|
||||||
|
http.Flusher
|
||||||
|
http.CloseNotifier
|
||||||
|
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
||||||
|
d := &responseWriterDelegator{
|
||||||
|
ResponseWriter: w,
|
||||||
|
observeWriteHeader: observeWriteHeaderFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
id := 0
|
||||||
|
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||||
|
//remove support from client_golang yet.
|
||||||
|
if _, ok := w.(http.CloseNotifier); ok {
|
||||||
|
id += closeNotifier
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Flusher); ok {
|
||||||
|
id += flusher
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Hijacker); ok {
|
||||||
|
id += hijacker
|
||||||
|
}
|
||||||
|
if _, ok := w.(io.ReaderFrom); ok {
|
||||||
|
id += readerFrom
|
||||||
|
}
|
||||||
|
if _, ok := w.(http.Pusher); ok {
|
||||||
|
id += pusher
|
||||||
|
}
|
||||||
|
|
||||||
|
return pickDelegator[id](d)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,181 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pusherDelegator struct{ *responseWriterDelegator }
|
|
||||||
|
|
||||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
|
||||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
|
||||||
return pusherDelegator{d}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
|
||||||
}
|
|
||||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
|
||||||
return struct {
|
|
||||||
*responseWriterDelegator
|
|
||||||
http.Pusher
|
|
||||||
io.ReaderFrom
|
|
||||||
http.Hijacker
|
|
||||||
http.Flusher
|
|
||||||
http.CloseNotifier
|
|
||||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
|
||||||
d := &responseWriterDelegator{
|
|
||||||
ResponseWriter: w,
|
|
||||||
observeWriteHeader: observeWriteHeaderFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
id := 0
|
|
||||||
if _, ok := w.(http.CloseNotifier); ok {
|
|
||||||
id += closeNotifier
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Flusher); ok {
|
|
||||||
id += flusher
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Hijacker); ok {
|
|
||||||
id += hijacker
|
|
||||||
}
|
|
||||||
if _, ok := w.(io.ReaderFrom); ok {
|
|
||||||
id += readerFrom
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Pusher); ok {
|
|
||||||
id += pusher
|
|
||||||
}
|
|
||||||
|
|
||||||
return pickDelegator[id](d)
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build !go1.8
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
|
|
||||||
d := &responseWriterDelegator{
|
|
||||||
ResponseWriter: w,
|
|
||||||
observeWriteHeader: observeWriteHeaderFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
id := 0
|
|
||||||
if _, ok := w.(http.CloseNotifier); ok {
|
|
||||||
id += closeNotifier
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Flusher); ok {
|
|
||||||
id += flusher
|
|
||||||
}
|
|
||||||
if _, ok := w.(http.Hijacker); ok {
|
|
||||||
id += hijacker
|
|
||||||
}
|
|
||||||
if _, ok := w.(io.ReaderFrom); ok {
|
|
||||||
id += readerFrom
|
|
||||||
}
|
|
||||||
|
|
||||||
return pickDelegator[id](d)
|
|
||||||
}
|
|
|
@ -47,7 +47,6 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
contentTypeHeader = "Content-Type"
|
contentTypeHeader = "Content-Type"
|
||||||
contentLengthHeader = "Content-Length"
|
|
||||||
contentEncodingHeader = "Content-Encoding"
|
contentEncodingHeader = "Content-Encoding"
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
)
|
)
|
||||||
|
|
|
@ -14,7 +14,9 @@
|
||||||
package promhttp
|
package promhttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptrace"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -95,3 +97,123 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||||
|
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
||||||
|
// representing the time in seconds since the start of the http request. A user
|
||||||
|
// may choose to use separately buckets Histograms, or implement custom
|
||||||
|
// instance labels on a per function basis.
|
||||||
|
type InstrumentTrace struct {
|
||||||
|
GotConn func(float64)
|
||||||
|
PutIdleConn func(float64)
|
||||||
|
GotFirstResponseByte func(float64)
|
||||||
|
Got100Continue func(float64)
|
||||||
|
DNSStart func(float64)
|
||||||
|
DNSDone func(float64)
|
||||||
|
ConnectStart func(float64)
|
||||||
|
ConnectDone func(float64)
|
||||||
|
TLSHandshakeStart func(float64)
|
||||||
|
TLSHandshakeDone func(float64)
|
||||||
|
WroteHeaders func(float64)
|
||||||
|
Wait100Continue func(float64)
|
||||||
|
WroteRequest func(float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
||||||
|
// RoundTripper and reports times to hook functions provided in the
|
||||||
|
// InstrumentTrace struct. Hook functions that are not present in the provided
|
||||||
|
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
||||||
|
// time since the start of the request. Only with Go1.9+, those times are
|
||||||
|
// guaranteed to never be negative. (Earlier Go versions are not using a
|
||||||
|
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
||||||
|
// should be used judiciously.
|
||||||
|
//
|
||||||
|
// For hook functions that receive an error as an argument, no observations are
|
||||||
|
// made in the event of a non-nil error value.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
|
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||||
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
trace := &httptrace.ClientTrace{
|
||||||
|
GotConn: func(_ httptrace.GotConnInfo) {
|
||||||
|
if it.GotConn != nil {
|
||||||
|
it.GotConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PutIdleConn: func(err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.PutIdleConn != nil {
|
||||||
|
it.PutIdleConn(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||||
|
if it.DNSStart != nil {
|
||||||
|
it.DNSStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||||
|
if it.DNSDone != nil {
|
||||||
|
it.DNSDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectStart: func(_, _ string) {
|
||||||
|
if it.ConnectStart != nil {
|
||||||
|
it.ConnectStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectDone: func(_, _ string, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.ConnectDone != nil {
|
||||||
|
it.ConnectDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
GotFirstResponseByte: func() {
|
||||||
|
if it.GotFirstResponseByte != nil {
|
||||||
|
it.GotFirstResponseByte(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Got100Continue: func() {
|
||||||
|
if it.Got100Continue != nil {
|
||||||
|
it.Got100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeStart: func() {
|
||||||
|
if it.TLSHandshakeStart != nil {
|
||||||
|
it.TLSHandshakeStart(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if it.TLSHandshakeDone != nil {
|
||||||
|
it.TLSHandshakeDone(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteHeaders: func() {
|
||||||
|
if it.WroteHeaders != nil {
|
||||||
|
it.WroteHeaders(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Wait100Continue: func() {
|
||||||
|
if it.Wait100Continue != nil {
|
||||||
|
it.Wait100Continue(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||||
|
if it.WroteRequest != nil {
|
||||||
|
it.WroteRequest(time.Since(start).Seconds())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
||||||
|
|
||||||
|
return next.RoundTrip(r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -1,144 +0,0 @@
|
||||||
// Copyright 2017 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptrace"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
|
||||||
// httptrace.ClientTrace hook functions. Each function is passed a float64
|
|
||||||
// representing the time in seconds since the start of the http request. A user
|
|
||||||
// may choose to use separately buckets Histograms, or implement custom
|
|
||||||
// instance labels on a per function basis.
|
|
||||||
type InstrumentTrace struct {
|
|
||||||
GotConn func(float64)
|
|
||||||
PutIdleConn func(float64)
|
|
||||||
GotFirstResponseByte func(float64)
|
|
||||||
Got100Continue func(float64)
|
|
||||||
DNSStart func(float64)
|
|
||||||
DNSDone func(float64)
|
|
||||||
ConnectStart func(float64)
|
|
||||||
ConnectDone func(float64)
|
|
||||||
TLSHandshakeStart func(float64)
|
|
||||||
TLSHandshakeDone func(float64)
|
|
||||||
WroteHeaders func(float64)
|
|
||||||
Wait100Continue func(float64)
|
|
||||||
WroteRequest func(float64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentRoundTripperTrace is a middleware that wraps the provided
|
|
||||||
// RoundTripper and reports times to hook functions provided in the
|
|
||||||
// InstrumentTrace struct. Hook functions that are not present in the provided
|
|
||||||
// InstrumentTrace struct are ignored. Times reported to the hook functions are
|
|
||||||
// time since the start of the request. Only with Go1.9+, those times are
|
|
||||||
// guaranteed to never be negative. (Earlier Go versions are not using a
|
|
||||||
// monotonic clock.) Note that partitioning of Histograms is expensive and
|
|
||||||
// should be used judiciously.
|
|
||||||
//
|
|
||||||
// For hook functions that receive an error as an argument, no observations are
|
|
||||||
// made in the event of a non-nil error value.
|
|
||||||
//
|
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
|
||||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
trace := &httptrace.ClientTrace{
|
|
||||||
GotConn: func(_ httptrace.GotConnInfo) {
|
|
||||||
if it.GotConn != nil {
|
|
||||||
it.GotConn(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
PutIdleConn: func(err error) {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if it.PutIdleConn != nil {
|
|
||||||
it.PutIdleConn(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
|
||||||
if it.DNSStart != nil {
|
|
||||||
it.DNSStart(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
|
||||||
if it.DNSDone != nil {
|
|
||||||
it.DNSDone(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ConnectStart: func(_, _ string) {
|
|
||||||
if it.ConnectStart != nil {
|
|
||||||
it.ConnectStart(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ConnectDone: func(_, _ string, err error) {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if it.ConnectDone != nil {
|
|
||||||
it.ConnectDone(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
GotFirstResponseByte: func() {
|
|
||||||
if it.GotFirstResponseByte != nil {
|
|
||||||
it.GotFirstResponseByte(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Got100Continue: func() {
|
|
||||||
if it.Got100Continue != nil {
|
|
||||||
it.Got100Continue(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
TLSHandshakeStart: func() {
|
|
||||||
if it.TLSHandshakeStart != nil {
|
|
||||||
it.TLSHandshakeStart(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if it.TLSHandshakeDone != nil {
|
|
||||||
it.TLSHandshakeDone(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
WroteHeaders: func() {
|
|
||||||
if it.WroteHeaders != nil {
|
|
||||||
it.WroteHeaders(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Wait100Continue: func() {
|
|
||||||
if it.Wait100Continue != nil {
|
|
||||||
it.Wait100Continue(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
|
||||||
if it.WroteRequest != nil {
|
|
||||||
it.WroteRequest(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
|
|
||||||
|
|
||||||
return next.RoundTrip(r)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -11,20 +11,21 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
package promhttp
|
package promhttp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestClientMiddlewareAPI(t *testing.T) {
|
func makeInstrumentedClient() (*http.Client, *prometheus.Registry) {
|
||||||
client := http.DefaultClient
|
client := http.DefaultClient
|
||||||
client.Timeout = 1 * time.Second
|
client.Timeout = 1 * time.Second
|
||||||
|
|
||||||
|
@ -74,16 +75,16 @@ func TestClientMiddlewareAPI(t *testing.T) {
|
||||||
|
|
||||||
trace := &InstrumentTrace{
|
trace := &InstrumentTrace{
|
||||||
DNSStart: func(t float64) {
|
DNSStart: func(t float64) {
|
||||||
dnsLatencyVec.WithLabelValues("dns_start")
|
dnsLatencyVec.WithLabelValues("dns_start").Observe(t)
|
||||||
},
|
},
|
||||||
DNSDone: func(t float64) {
|
DNSDone: func(t float64) {
|
||||||
dnsLatencyVec.WithLabelValues("dns_done")
|
dnsLatencyVec.WithLabelValues("dns_done").Observe(t)
|
||||||
},
|
},
|
||||||
TLSHandshakeStart: func(t float64) {
|
TLSHandshakeStart: func(t float64) {
|
||||||
tlsLatencyVec.WithLabelValues("tls_handshake_start")
|
tlsLatencyVec.WithLabelValues("tls_handshake_start").Observe(t)
|
||||||
},
|
},
|
||||||
TLSHandshakeDone: func(t float64) {
|
TLSHandshakeDone: func(t float64) {
|
||||||
tlsLatencyVec.WithLabelValues("tls_handshake_done")
|
tlsLatencyVec.WithLabelValues("tls_handshake_done").Observe(t)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,12 +95,100 @@ func TestClientMiddlewareAPI(t *testing.T) {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
return client, reg
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := client.Get("http://google.com")
|
func TestClientMiddlewareAPI(t *testing.T) {
|
||||||
|
client, reg := makeInstrumentedClient()
|
||||||
|
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer backend.Close()
|
||||||
|
|
||||||
|
resp, err := client.Get(backend.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
mfs, err := reg.Gather()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if want, got := 3, len(mfs); want != got {
|
||||||
|
t.Fatalf("unexpected number of metric families gathered, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if len(mf.Metric) == 0 {
|
||||||
|
t.Errorf("metric family %s must not be empty", mf.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientMiddlewareAPIWithRequestContext(t *testing.T) {
|
||||||
|
client, reg := makeInstrumentedClient()
|
||||||
|
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer backend.Close()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", backend.URL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%v", err)
|
t.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set a context with a long timeout.
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
mfs, err := reg.Gather()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if want, got := 3, len(mfs); want != got {
|
||||||
|
t.Fatalf("unexpected number of metric families gathered, want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
for _, mf := range mfs {
|
||||||
|
if len(mf.Metric) == 0 {
|
||||||
|
t.Errorf("metric family %s must not be empty", mf.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientMiddlewareAPIWithRequestContextTimeout(t *testing.T) {
|
||||||
|
client, _ := makeInstrumentedClient()
|
||||||
|
|
||||||
|
// Slow testserver responding in 100ms.
|
||||||
|
backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer backend.Close()
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", backend.URL, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a context with a short timeout.
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
|
||||||
|
_, err = client.Do(req)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("did not get timeout error")
|
||||||
|
}
|
||||||
|
if want, got := fmt.Sprintf("Get %s: context deadline exceeded", backend.URL), err.Error(); want != got {
|
||||||
|
t.Fatalf("want error %q, got %q", want, got)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExampleInstrumentRoundTripperDuration() {
|
func ExampleInstrumentRoundTripperDuration() {
|
||||||
|
@ -162,16 +251,16 @@ func ExampleInstrumentRoundTripperDuration() {
|
||||||
// functions that we want to instrument.
|
// functions that we want to instrument.
|
||||||
trace := &InstrumentTrace{
|
trace := &InstrumentTrace{
|
||||||
DNSStart: func(t float64) {
|
DNSStart: func(t float64) {
|
||||||
dnsLatencyVec.WithLabelValues("dns_start")
|
dnsLatencyVec.WithLabelValues("dns_start").Observe(t)
|
||||||
},
|
},
|
||||||
DNSDone: func(t float64) {
|
DNSDone: func(t float64) {
|
||||||
dnsLatencyVec.WithLabelValues("dns_done")
|
dnsLatencyVec.WithLabelValues("dns_done").Observe(t)
|
||||||
},
|
},
|
||||||
TLSHandshakeStart: func(t float64) {
|
TLSHandshakeStart: func(t float64) {
|
||||||
tlsLatencyVec.WithLabelValues("tls_handshake_start")
|
tlsLatencyVec.WithLabelValues("tls_handshake_start").Observe(t)
|
||||||
},
|
},
|
||||||
TLSHandshakeDone: func(t float64) {
|
TLSHandshakeDone: func(t float64) {
|
||||||
tlsLatencyVec.WithLabelValues("tls_handshake_done")
|
tlsLatencyVec.WithLabelValues("tls_handshake_done").Observe(t)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -294,6 +294,8 @@ func (t *testFlusher) Flush() { t.flushCalled = true }
|
||||||
func TestInterfaceUpgrade(t *testing.T) {
|
func TestInterfaceUpgrade(t *testing.T) {
|
||||||
w := &testResponseWriter{}
|
w := &testResponseWriter{}
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
|
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||||
|
//remove support from client_golang yet.
|
||||||
d.(http.CloseNotifier).CloseNotify()
|
d.(http.CloseNotifier).CloseNotify()
|
||||||
if !w.closeNotifyCalled {
|
if !w.closeNotifyCalled {
|
||||||
t.Error("CloseNotify not called")
|
t.Error("CloseNotify not called")
|
||||||
|
@ -312,6 +314,8 @@ func TestInterfaceUpgrade(t *testing.T) {
|
||||||
|
|
||||||
f := &testFlusher{}
|
f := &testFlusher{}
|
||||||
d = newDelegator(f, nil)
|
d = newDelegator(f, nil)
|
||||||
|
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
|
||||||
|
//remove support from client_golang yet.
|
||||||
if _, ok := d.(http.CloseNotifier); ok {
|
if _, ok := d.(http.CloseNotifier); ok {
|
||||||
t.Error("delegator unexpectedly implements http.CloseNotifier")
|
t.Error("delegator unexpectedly implements http.CloseNotifier")
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,6 +50,11 @@ import (
|
||||||
|
|
||||||
const contentTypeHeader = "Content-Type"
|
const contentTypeHeader = "Content-Type"
|
||||||
|
|
||||||
|
// HTTPDoer is an interface for the one method of http.Client that is used by Pusher
|
||||||
|
type HTTPDoer interface {
|
||||||
|
Do(*http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
// Pusher manages a push to the Pushgateway. Use New to create one, configure it
|
// Pusher manages a push to the Pushgateway. Use New to create one, configure it
|
||||||
// with its methods, and finally use the Add or Push method to push.
|
// with its methods, and finally use the Add or Push method to push.
|
||||||
type Pusher struct {
|
type Pusher struct {
|
||||||
|
@ -61,9 +66,11 @@ type Pusher struct {
|
||||||
gatherers prometheus.Gatherers
|
gatherers prometheus.Gatherers
|
||||||
registerer prometheus.Registerer
|
registerer prometheus.Registerer
|
||||||
|
|
||||||
client *http.Client
|
client HTTPDoer
|
||||||
useBasicAuth bool
|
useBasicAuth bool
|
||||||
username, password string
|
username, password string
|
||||||
|
|
||||||
|
expfmt expfmt.Format
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Pusher to push to the provided URL with the provided job
|
// New creates a new Pusher to push to the provided URL with the provided job
|
||||||
|
@ -96,6 +103,7 @@ func New(url, job string) *Pusher {
|
||||||
gatherers: prometheus.Gatherers{reg},
|
gatherers: prometheus.Gatherers{reg},
|
||||||
registerer: reg,
|
registerer: reg,
|
||||||
client: &http.Client{},
|
client: &http.Client{},
|
||||||
|
expfmt: expfmt.FmtProtoDelim,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,7 +175,11 @@ func (p *Pusher) Grouping(name, value string) *Pusher {
|
||||||
|
|
||||||
// Client sets a custom HTTP client for the Pusher. For convenience, this method
|
// Client sets a custom HTTP client for the Pusher. For convenience, this method
|
||||||
// returns a pointer to the Pusher itself.
|
// returns a pointer to the Pusher itself.
|
||||||
func (p *Pusher) Client(c *http.Client) *Pusher {
|
// Pusher only needs one method of the custom HTTP client: Do(*http.Request).
|
||||||
|
// Thus, rather than requiring a fully fledged http.Client,
|
||||||
|
// the provided client only needs to implement the HTTPDoer interface.
|
||||||
|
// Since *http.Client naturally implements that interface, it can still be used normally.
|
||||||
|
func (p *Pusher) Client(c HTTPDoer) *Pusher {
|
||||||
p.client = c
|
p.client = c
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
@ -182,6 +194,16 @@ func (p *Pusher) BasicAuth(username, password string) *Pusher {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Format configures the Pusher to use an encoding format given by the
|
||||||
|
// provided expfmt.Format. The default format is expfmt.FmtProtoDelim and
|
||||||
|
// should be used with the standard Prometheus Pushgateway. Custom
|
||||||
|
// implementations may require different formats. For convenience, this
|
||||||
|
// method returns a pointer to the Pusher itself.
|
||||||
|
func (p *Pusher) Format(format expfmt.Format) *Pusher {
|
||||||
|
p.expfmt = format
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Pusher) push(method string) error {
|
func (p *Pusher) push(method string) error {
|
||||||
if p.error != nil {
|
if p.error != nil {
|
||||||
return p.error
|
return p.error
|
||||||
|
@ -197,7 +219,7 @@ func (p *Pusher) push(method string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
enc := expfmt.NewEncoder(buf, p.expfmt)
|
||||||
// Check for pre-existing grouping labels:
|
// Check for pre-existing grouping labels:
|
||||||
for _, mf := range mfs {
|
for _, mf := range mfs {
|
||||||
for _, m := range mf.GetMetric() {
|
for _, m := range mf.GetMetric() {
|
||||||
|
@ -222,7 +244,7 @@ func (p *Pusher) push(method string) error {
|
||||||
if p.useBasicAuth {
|
if p.useBasicAuth {
|
||||||
req.SetBasicAuth(p.username, p.password)
|
req.SetBasicAuth(p.username, p.password)
|
||||||
}
|
}
|
||||||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
|
req.Header.Set(contentTypeHeader, string(p.expfmt))
|
||||||
resp, err := p.client.Do(req)
|
resp, err := p.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -96,7 +96,7 @@ func TestPush(t *testing.T) {
|
||||||
if lastMethod != "PUT" {
|
if lastMethod != "PUT" {
|
||||||
t.Error("want method PUT for Push, got", lastMethod)
|
t.Error("want method PUT for Push, got", lastMethod)
|
||||||
}
|
}
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
if !bytes.Equal(lastBody, wantBody) {
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||||
}
|
}
|
||||||
if lastPath != "/metrics/job/testjob" {
|
if lastPath != "/metrics/job/testjob" {
|
||||||
|
@ -113,7 +113,7 @@ func TestPush(t *testing.T) {
|
||||||
if lastMethod != "POST" {
|
if lastMethod != "POST" {
|
||||||
t.Error("want method POST for Add, got", lastMethod)
|
t.Error("want method POST for Add, got", lastMethod)
|
||||||
}
|
}
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
if !bytes.Equal(lastBody, wantBody) {
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||||
}
|
}
|
||||||
if lastPath != "/metrics/job/testjob" {
|
if lastPath != "/metrics/job/testjob" {
|
||||||
|
@ -170,7 +170,7 @@ func TestPush(t *testing.T) {
|
||||||
if lastMethod != "PUT" {
|
if lastMethod != "PUT" {
|
||||||
t.Error("want method PUT for Push, got", lastMethod)
|
t.Error("want method PUT for Push, got", lastMethod)
|
||||||
}
|
}
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
if !bytes.Equal(lastBody, wantBody) {
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ func TestPush(t *testing.T) {
|
||||||
if lastMethod != "POST" {
|
if lastMethod != "POST" {
|
||||||
t.Error("want method POST for Add, got", lastMethod)
|
t.Error("want method POST for Add, got", lastMethod)
|
||||||
}
|
}
|
||||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
if !bytes.Equal(lastBody, wantBody) {
|
||||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||||
}
|
}
|
||||||
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
|
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
|
||||||
|
|
|
@ -680,7 +680,7 @@ func processMetric(
|
||||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
||||||
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
||||||
// slice in order and returns the merged results. Errors returned from the
|
// slice in order and returns the merged results. Errors returned from the
|
||||||
// Gather calles are all returned in a flattened MultiError. Duplicate and
|
// Gather calls are all returned in a flattened MultiError. Duplicate and
|
||||||
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
||||||
// reported in the returned error.
|
// reported in the returned error.
|
||||||
//
|
//
|
||||||
|
@ -872,7 +872,13 @@ func checkMetricConsistency(
|
||||||
h = hashAddByte(h, separatorByte)
|
h = hashAddByte(h, separatorByte)
|
||||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||||
// check.
|
// check.
|
||||||
sort.Sort(labelPairSorter(dtoMetric.Label))
|
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
|
||||||
|
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
|
||||||
|
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
|
||||||
|
copy(copiedLabels, dtoMetric.Label)
|
||||||
|
sort.Sort(labelPairSorter(copiedLabels))
|
||||||
|
dtoMetric.Label = copiedLabels
|
||||||
|
}
|
||||||
for _, lp := range dtoMetric.Label {
|
for _, lp := range dtoMetric.Label {
|
||||||
h = hashAdd(h, lp.GetName())
|
h = hashAdd(h, lp.GetName())
|
||||||
h = hashAddByte(h, separatorByte)
|
h = hashAddByte(h, separatorByte)
|
||||||
|
@ -903,8 +909,8 @@ func checkDescConsistency(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is the desc consistent with the content of the metric?
|
// Is the desc consistent with the content of the metric?
|
||||||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
|
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
|
||||||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
|
copy(lpsFromDesc, desc.constLabelPairs)
|
||||||
for _, l := range desc.variableLabels {
|
for _, l := range desc.variableLabels {
|
||||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
||||||
Name: proto.String(l),
|
Name: proto.String(l),
|
||||||
|
|
|
@ -21,6 +21,7 @@ package prometheus_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -784,6 +785,11 @@ func TestAlreadyRegistered(t *testing.T) {
|
||||||
// same HistogramVec is registered concurrently and the Gather method of the
|
// same HistogramVec is registered concurrently and the Gather method of the
|
||||||
// registry is called concurrently.
|
// registry is called concurrently.
|
||||||
func TestHistogramVecRegisterGatherConcurrency(t *testing.T) {
|
func TestHistogramVecRegisterGatherConcurrency(t *testing.T) {
|
||||||
|
labelNames := make([]string, 16) // Need at least 13 to expose #512.
|
||||||
|
for i := range labelNames {
|
||||||
|
labelNames[i] = fmt.Sprint("label_", i)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
reg = prometheus.NewPedanticRegistry()
|
reg = prometheus.NewPedanticRegistry()
|
||||||
hv = prometheus.NewHistogramVec(
|
hv = prometheus.NewHistogramVec(
|
||||||
|
@ -792,7 +798,7 @@ func TestHistogramVecRegisterGatherConcurrency(t *testing.T) {
|
||||||
Help: "This helps testing.",
|
Help: "This helps testing.",
|
||||||
ConstLabels: prometheus.Labels{"foo": "bar"},
|
ConstLabels: prometheus.Labels{"foo": "bar"},
|
||||||
},
|
},
|
||||||
[]string{"one", "two", "three"},
|
labelNames,
|
||||||
)
|
)
|
||||||
labelValues = []string{"a", "b", "c", "alpha", "beta", "gamma", "aleph", "beth", "gimel"}
|
labelValues = []string{"a", "b", "c", "alpha", "beta", "gamma", "aleph", "beth", "gimel"}
|
||||||
quit = make(chan struct{})
|
quit = make(chan struct{})
|
||||||
|
@ -807,11 +813,11 @@ func TestHistogramVecRegisterGatherConcurrency(t *testing.T) {
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
obs := rand.NormFloat64()*.1 + .2
|
obs := rand.NormFloat64()*.1 + .2
|
||||||
hv.WithLabelValues(
|
values := make([]string, 0, len(labelNames))
|
||||||
labelValues[rand.Intn(len(labelValues))],
|
for range labelNames {
|
||||||
labelValues[rand.Intn(len(labelValues))],
|
values = append(values, labelValues[rand.Intn(len(labelValues))])
|
||||||
labelValues[rand.Intn(len(labelValues))],
|
}
|
||||||
).Observe(obs)
|
hv.WithLabelValues(values...).Observe(obs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -849,7 +855,7 @@ func TestHistogramVecRegisterGatherConcurrency(t *testing.T) {
|
||||||
if len(g) != 1 {
|
if len(g) != 1 {
|
||||||
t.Error("Gathered unexpected number of metric families:", len(g))
|
t.Error("Gathered unexpected number of metric families:", len(g))
|
||||||
}
|
}
|
||||||
if len(g[0].Metric[0].Label) != 4 {
|
if len(g[0].Metric[0].Label) != len(labelNames)+1 {
|
||||||
t.Error("Gathered unexpected number of label pairs:", len(g[0].Metric[0].Label))
|
t.Error("Gathered unexpected number of label pairs:", len(g[0].Metric[0].Label))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,8 +16,10 @@ package prometheus
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/beorn7/perks/quantile"
|
"github.com/beorn7/perks/quantile"
|
||||||
|
@ -138,7 +140,7 @@ type SummaryOpts struct {
|
||||||
BufCap uint32
|
BufCap uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
// Problem with the sliding-window decay algorithm... The Merge method of
|
||||||
// perk/quantile is actually not working as advertised - and it might be
|
// perk/quantile is actually not working as advertised - and it might be
|
||||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||||
|
@ -201,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
opts.BufCap = DefBufCap
|
opts.BufCap = DefBufCap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(opts.Objectives) == 0 {
|
||||||
|
// Use the lock-free implementation of a Summary without objectives.
|
||||||
|
s := &noObjectivesSummary{
|
||||||
|
desc: desc,
|
||||||
|
labelPairs: makeLabelPairs(desc, labelValues),
|
||||||
|
counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
|
||||||
|
}
|
||||||
|
s.init(s) // Init self-collection.
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
s := &summary{
|
s := &summary{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
|
|
||||||
|
@ -369,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type summaryCounts struct {
|
||||||
|
// sumBits contains the bits of the float64 representing the sum of all
|
||||||
|
// observations. sumBits and count have to go first in the struct to
|
||||||
|
// guarantee alignment for atomic operations.
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
sumBits uint64
|
||||||
|
count uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type noObjectivesSummary struct {
|
||||||
|
// countAndHotIdx enables lock-free writes with use of atomic updates.
|
||||||
|
// The most significant bit is the hot index [0 or 1] of the count field
|
||||||
|
// below. Observe calls update the hot one. All remaining bits count the
|
||||||
|
// number of Observe calls. Observe starts by incrementing this counter,
|
||||||
|
// and finish by incrementing the count field in the respective
|
||||||
|
// summaryCounts, as a marker for completion.
|
||||||
|
//
|
||||||
|
// Calls of the Write method (which are non-mutating reads from the
|
||||||
|
// perspective of the summary) swap the hot–cold under the writeMtx
|
||||||
|
// lock. A cooldown is awaited (while locked) by comparing the number of
|
||||||
|
// observations with the initiation count. Once they match, then the
|
||||||
|
// last observation on the now cool one has completed. All cool fields must
|
||||||
|
// be merged into the new hot before releasing writeMtx.
|
||||||
|
|
||||||
|
// Fields with atomic access first! See alignment constraint:
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||||
|
countAndHotIdx uint64
|
||||||
|
|
||||||
|
selfCollector
|
||||||
|
desc *Desc
|
||||||
|
writeMtx sync.Mutex // Only used in the Write method.
|
||||||
|
|
||||||
|
// Two counts, one is "hot" for lock-free observations, the other is
|
||||||
|
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||||
|
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||||
|
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||||
|
counts [2]*summaryCounts
|
||||||
|
|
||||||
|
labelPairs []*dto.LabelPair
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Desc() *Desc {
|
||||||
|
return s.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Observe(v float64) {
|
||||||
|
// We increment h.countAndHotIdx so that the counter in the lower
|
||||||
|
// 63 bits gets incremented. At the same time, we get the new value
|
||||||
|
// back, which we can use to find the currently-hot counts.
|
||||||
|
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||||
|
hotCounts := s.counts[n>>63]
|
||||||
|
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Increment count last as we take it as a signal that the observation
|
||||||
|
// is complete.
|
||||||
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||||
|
// For simplicity, we protect this whole method by a mutex. It is not in
|
||||||
|
// the hot path, i.e. Observe is called much more often than Write. The
|
||||||
|
// complication of making Write lock-free isn't worth it, if possible at
|
||||||
|
// all.
|
||||||
|
s.writeMtx.Lock()
|
||||||
|
defer s.writeMtx.Unlock()
|
||||||
|
|
||||||
|
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
|
||||||
|
// without touching the count bits. See the struct comments for a full
|
||||||
|
// description of the algorithm.
|
||||||
|
n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
|
||||||
|
// count is contained unchanged in the lower 63 bits.
|
||||||
|
count := n & ((1 << 63) - 1)
|
||||||
|
// The most significant bit tells us which counts is hot. The complement
|
||||||
|
// is thus the cold one.
|
||||||
|
hotCounts := s.counts[n>>63]
|
||||||
|
coldCounts := s.counts[(^n)>>63]
|
||||||
|
|
||||||
|
// Await cooldown.
|
||||||
|
for count != atomic.LoadUint64(&coldCounts.count) {
|
||||||
|
runtime.Gosched() // Let observations get work done.
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := &dto.Summary{
|
||||||
|
SampleCount: proto.Uint64(count),
|
||||||
|
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Summary = sum
|
||||||
|
out.Label = s.labelPairs
|
||||||
|
|
||||||
|
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||||
|
atomic.AddUint64(&hotCounts.count, count)
|
||||||
|
atomic.StoreUint64(&coldCounts.count, 0)
|
||||||
|
for {
|
||||||
|
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||||
|
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||||
|
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type quantSort []*dto.Quantile
|
type quantSort []*dto.Quantile
|
||||||
|
|
||||||
func (s quantSort) Len() int {
|
func (s quantSort) Len() int {
|
||||||
|
|
|
@ -54,11 +54,19 @@ func TestSummaryWithoutObjectives(t *testing.T) {
|
||||||
if err := reg.Register(summaryWithEmptyObjectives); err != nil {
|
if err := reg.Register(summaryWithEmptyObjectives); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
summaryWithEmptyObjectives.Observe(3)
|
||||||
|
summaryWithEmptyObjectives.Observe(0.14)
|
||||||
|
|
||||||
m := &dto.Metric{}
|
m := &dto.Metric{}
|
||||||
if err := summaryWithEmptyObjectives.Write(m); err != nil {
|
if err := summaryWithEmptyObjectives.Write(m); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
if got, want := m.GetSummary().GetSampleSum(), 3.14; got != want {
|
||||||
|
t.Errorf("got sample sum %f, want %f", got, want)
|
||||||
|
}
|
||||||
|
if got, want := m.GetSummary().GetSampleCount(), uint64(2); got != want {
|
||||||
|
t.Errorf("got sample sum %d, want %d", got, want)
|
||||||
|
}
|
||||||
if len(m.GetSummary().Quantile) != 0 {
|
if len(m.GetSummary().Quantile) != 0 {
|
||||||
t.Error("expected no objectives in summary")
|
t.Error("expected no objectives in summary")
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
|
|
||||||
|
@ -125,47 +124,51 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames .
|
||||||
// exposition format. If any metricNames are provided, only metrics with those
|
// exposition format. If any metricNames are provided, only metrics with those
|
||||||
// names are compared.
|
// names are compared.
|
||||||
func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
|
func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
|
||||||
metrics, err := g.Gather()
|
got, err := g.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("gathering metrics failed: %s", err)
|
return fmt.Errorf("gathering metrics failed: %s", err)
|
||||||
}
|
}
|
||||||
if metricNames != nil {
|
if metricNames != nil {
|
||||||
metrics = filterMetrics(metrics, metricNames)
|
got = filterMetrics(got, metricNames)
|
||||||
}
|
}
|
||||||
var tp expfmt.TextParser
|
var tp expfmt.TextParser
|
||||||
expectedMetrics, err := tp.TextToMetricFamilies(expected)
|
wantRaw, err := tp.TextToMetricFamilies(expected)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing expected metrics failed: %s", err)
|
return fmt.Errorf("parsing expected metrics failed: %s", err)
|
||||||
}
|
}
|
||||||
|
want := internal.NormalizeMetricFamilies(wantRaw)
|
||||||
|
|
||||||
if !reflect.DeepEqual(metrics, internal.NormalizeMetricFamilies(expectedMetrics)) {
|
return compare(got, want)
|
||||||
// Encode the gathered output to the readable text format for comparison.
|
}
|
||||||
var buf1 bytes.Buffer
|
|
||||||
enc := expfmt.NewEncoder(&buf1, expfmt.FmtText)
|
|
||||||
for _, mf := range metrics {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
return fmt.Errorf("encoding result failed: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Encode normalized expected metrics again to generate them in the same ordering
|
|
||||||
// the registry does to spot differences more easily.
|
|
||||||
var buf2 bytes.Buffer
|
|
||||||
enc = expfmt.NewEncoder(&buf2, expfmt.FmtText)
|
|
||||||
for _, mf := range internal.NormalizeMetricFamilies(expectedMetrics) {
|
|
||||||
if err := enc.Encode(mf); err != nil {
|
|
||||||
return fmt.Errorf("encoding result failed: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// compare encodes both provided slices of metric families into the text format,
|
||||||
|
// compares their string message, and returns an error if they do not match.
|
||||||
|
// The error contains the encoded text of both the desired and the actual
|
||||||
|
// result.
|
||||||
|
func compare(got, want []*dto.MetricFamily) error {
|
||||||
|
var gotBuf, wantBuf bytes.Buffer
|
||||||
|
enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText)
|
||||||
|
for _, mf := range got {
|
||||||
|
if err := enc.Encode(mf); err != nil {
|
||||||
|
return fmt.Errorf("encoding gathered metrics failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText)
|
||||||
|
for _, mf := range want {
|
||||||
|
if err := enc.Encode(mf); err != nil {
|
||||||
|
return fmt.Errorf("encoding expected metrics failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if wantBuf.String() != gotBuf.String() {
|
||||||
return fmt.Errorf(`
|
return fmt.Errorf(`
|
||||||
metric output does not match expectation; want:
|
metric output does not match expectation; want:
|
||||||
|
|
||||||
%s
|
%s
|
||||||
|
|
||||||
got:
|
got:
|
||||||
|
|
||||||
%s
|
%s`, wantBuf.String(), gotBuf.String())
|
||||||
`, buf2.String(), buf1.String())
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,6 +143,103 @@ func TestCollectAndCompare(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCollectAndCompareNoLabel(t *testing.T) {
|
||||||
|
const metadata = `
|
||||||
|
# HELP some_total A value that represents a counter.
|
||||||
|
# TYPE some_total counter
|
||||||
|
`
|
||||||
|
|
||||||
|
c := prometheus.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "some_total",
|
||||||
|
Help: "A value that represents a counter.",
|
||||||
|
})
|
||||||
|
c.Inc()
|
||||||
|
|
||||||
|
expected := `
|
||||||
|
|
||||||
|
some_total 1
|
||||||
|
`
|
||||||
|
|
||||||
|
if err := CollectAndCompare(c, strings.NewReader(metadata+expected), "some_total"); err != nil {
|
||||||
|
t.Errorf("unexpected collecting result:\n%s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectAndCompareHistogram(t *testing.T) {
|
||||||
|
inputs := []struct {
|
||||||
|
name string
|
||||||
|
c prometheus.Collector
|
||||||
|
metadata string
|
||||||
|
expect string
|
||||||
|
observation float64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Testing Histogram Collector",
|
||||||
|
c: prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||||
|
Name: "some_histogram",
|
||||||
|
Help: "An example of a histogram",
|
||||||
|
Buckets: []float64{1, 2, 3},
|
||||||
|
}),
|
||||||
|
metadata: `
|
||||||
|
# HELP some_histogram An example of a histogram
|
||||||
|
# TYPE some_histogram histogram
|
||||||
|
`,
|
||||||
|
expect: `
|
||||||
|
some_histogram{le="1"} 0
|
||||||
|
some_histogram{le="2"} 0
|
||||||
|
some_histogram{le="3"} 1
|
||||||
|
some_histogram_bucket{le="+Inf"} 1
|
||||||
|
some_histogram_sum 2.5
|
||||||
|
some_histogram_count 1
|
||||||
|
|
||||||
|
`,
|
||||||
|
observation: 2.5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Testing HistogramVec Collector",
|
||||||
|
c: prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||||
|
Name: "some_histogram",
|
||||||
|
Help: "An example of a histogram",
|
||||||
|
Buckets: []float64{1, 2, 3},
|
||||||
|
}, []string{"test"}),
|
||||||
|
|
||||||
|
metadata: `
|
||||||
|
# HELP some_histogram An example of a histogram
|
||||||
|
# TYPE some_histogram histogram
|
||||||
|
`,
|
||||||
|
expect: `
|
||||||
|
some_histogram_bucket{test="test",le="1"} 0
|
||||||
|
some_histogram_bucket{test="test",le="2"} 0
|
||||||
|
some_histogram_bucket{test="test",le="3"} 1
|
||||||
|
some_histogram_bucket{test="test",le="+Inf"} 1
|
||||||
|
some_histogram_sum{test="test"} 2.5
|
||||||
|
some_histogram_count{test="test"} 1
|
||||||
|
|
||||||
|
`,
|
||||||
|
observation: 2.5,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, input := range inputs {
|
||||||
|
switch collector := input.c.(type) {
|
||||||
|
case prometheus.Histogram:
|
||||||
|
collector.Observe(input.observation)
|
||||||
|
case *prometheus.HistogramVec:
|
||||||
|
collector.WithLabelValues("test").Observe(input.observation)
|
||||||
|
default:
|
||||||
|
t.Fatalf("unsuported collector tested")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run(input.name, func(t *testing.T) {
|
||||||
|
if err := CollectAndCompare(input.c, strings.NewReader(input.metadata+input.expect)); err != nil {
|
||||||
|
t.Errorf("unexpected collecting result:\n%s", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestNoMetricFilter(t *testing.T) {
|
func TestNoMetricFilter(t *testing.T) {
|
||||||
const metadata = `
|
const metadata = `
|
||||||
# HELP some_total A value that represents a counter.
|
# HELP some_total A value that represents a counter.
|
||||||
|
@ -193,13 +290,11 @@ metric output does not match expectation; want:
|
||||||
# TYPE some_other_metric counter
|
# TYPE some_other_metric counter
|
||||||
some_other_metric{label1="value1"} 1
|
some_other_metric{label1="value1"} 1
|
||||||
|
|
||||||
|
|
||||||
got:
|
got:
|
||||||
|
|
||||||
# HELP some_total A value that represents a counter.
|
# HELP some_total A value that represents a counter.
|
||||||
# TYPE some_total counter
|
# TYPE some_total counter
|
||||||
some_total{label1="value1"} 1
|
some_total{label1="value1"} 1
|
||||||
|
|
||||||
`
|
`
|
||||||
|
|
||||||
err := CollectAndCompare(c, strings.NewReader(metadata+expected))
|
err := CollectAndCompare(c, strings.NewReader(metadata+expected))
|
||||||
|
@ -208,6 +303,6 @@ some_total{label1="value1"} 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if err.Error() != expectedError {
|
if err.Error() != expectedError {
|
||||||
t.Errorf("Expected\n%#+v\nGot:\n%#+v\n", expectedError, err.Error())
|
t.Errorf("Expected\n%#+v\nGot:\n%#+v", expectedError, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,13 +39,16 @@ func NewTimer(o Observer) *Timer {
|
||||||
|
|
||||||
// ObserveDuration records the duration passed since the Timer was created with
|
// ObserveDuration records the duration passed since the Timer was created with
|
||||||
// NewTimer. It calls the Observe method of the Observer provided during
|
// NewTimer. It calls the Observe method of the Observer provided during
|
||||||
// construction with the duration in seconds as an argument. ObserveDuration is
|
// construction with the duration in seconds as an argument. The observed
|
||||||
// usually called with a defer statement.
|
// duration is also returned. ObserveDuration is usually called with a defer
|
||||||
|
// statement.
|
||||||
//
|
//
|
||||||
// Note that this method is only guaranteed to never observe negative durations
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
// if used with Go1.9+.
|
// if used with Go1.9+.
|
||||||
func (t *Timer) ObserveDuration() {
|
func (t *Timer) ObserveDuration() time.Duration {
|
||||||
|
d := time.Since(t.begin)
|
||||||
if t.observer != nil {
|
if t.observer != nil {
|
||||||
t.observer.Observe(time.Since(t.begin).Seconds())
|
t.observer.Observe(d.Seconds())
|
||||||
}
|
}
|
||||||
|
return d
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
@ -286,7 +286,7 @@ func TestWrap(t *testing.T) {
|
||||||
err = lReg.Register(tr.collector)
|
err = lReg.Register(tr.collector)
|
||||||
}
|
}
|
||||||
if tr.registrationFails && err == nil {
|
if tr.registrationFails && err == nil {
|
||||||
t.Fatalf("registration with wrapping registry unexpectedly succeded for collector #%d", i)
|
t.Fatalf("registration with wrapping registry unexpectedly succeeded for collector #%d", i)
|
||||||
}
|
}
|
||||||
if !tr.registrationFails && err != nil {
|
if !tr.registrationFails && err != nil {
|
||||||
t.Fatalf("registration with wrapping registry failed for collector #%d: %s", i, err)
|
t.Fatalf("registration with wrapping registry failed for collector #%d: %s", i, err)
|
||||||
|
@ -295,7 +295,7 @@ func TestWrap(t *testing.T) {
|
||||||
wantMF := toMetricFamilies(s.output...)
|
wantMF := toMetricFamilies(s.output...)
|
||||||
gotMF, err := reg.Gather()
|
gotMF, err := reg.Gather()
|
||||||
if s.gatherFails && err == nil {
|
if s.gatherFails && err == nil {
|
||||||
t.Fatal("gathering unexpectedly succeded")
|
t.Fatal("gathering unexpectedly succeeded")
|
||||||
}
|
}
|
||||||
if !s.gatherFails && err != nil {
|
if !s.gatherFails && err != nil {
|
||||||
t.Fatal("gathering failed:", err)
|
t.Fatal("gathering failed:", err)
|
||||||
|
|
Loading…
Reference in New Issue