Merge branch 'main' into sparsehistogram

This commit is contained in:
Ganesh Vernekar 2022-06-20 16:50:03 +05:30
commit 6ba7871ebb
No known key found for this signature in database
GPG Key ID: 0F8729A5EB59B965
44 changed files with 242 additions and 221 deletions

View File

@ -46,10 +46,6 @@ workflows:
client_golang: client_golang:
jobs: jobs:
# Refer to README.md for the currently supported versions. # Refer to README.md for the currently supported versions.
- test:
name: go-1-16
go_version: "1.16"
run_lint: true
- test: - test:
name: go-1-17 name: go-1-17
go_version: "1.17" go_version: "1.17"

34
.github/settings.yml vendored Normal file
View File

@ -0,0 +1,34 @@
---
branches:
- name: main
protection:
# Required. Require at least one approving review on a pull request, before merging. Set to null to disable.
required_pull_request_reviews:
# The number of approvals required. (1-6)
required_approving_review_count: 1
# Dismiss approved reviews automatically when a new commit is pushed.
dismiss_stale_reviews: false
# Blocks merge until code owners have reviewed.
require_code_owner_reviews: false
# Specify which users and teams can dismiss pull request reviews. Pass an empty dismissal_restrictions object to disable. User and team dismissal_restrictions are only available for organization-owned repositories. Omit this parameter for personal repositories.
dismissal_restrictions:
users: []
teams: []
# Required. Require status checks to pass before merging. Set to null to disable
required_status_checks:
# Required. Require branches to be up to date before merging.
strict: false
# Required. The list of status checks to require in order to merge into this branch
contexts:
- DCO
- "ci/circleci: go-1-17"
- "ci/circleci: go-1-18"
# Required. Enforce all configured restrictions for administrators. Set to true to enforce required status checks for repository administrators. Set to null to disable.
enforce_admins: false
# Prevent merge commits from being pushed to matching branches
required_linear_history: false
# Required. Restrict who can push to this branch. Team and user restrictions are only available for organization-owned repositories. Set to null to disable.
restrictions:
apps: []
users: []
teams: []

View File

@ -1,3 +1,4 @@
---
name: golangci-lint name: golangci-lint
on: on:
push: push:
@ -25,6 +26,6 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@v3.1.0 uses: golangci/golangci-lint-action@v3.2.0
with: with:
version: v1.45.2 version: v1.45.2

View File

@ -1,5 +1,31 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one. ---
run:
deadline: 5m
output:
sort-results: true
linters: linters:
enable: enable:
- staticcheck - gofumpt
disable-all: true - goimports
- revive
- misspell
issues:
max-same-issues: 0
exclude-rules:
- path: _test.go
linters:
- errcheck
- govet
- structcheck
linters-settings:
errcheck:
exclude: scripts/errcheck_excludes.txt
goimports:
local-prefixes: github.com/prometheus/client_golang
gofumpt:
extra-rules: true

View File

@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version)
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
GOVENDOR :=
GO111MODULE :=
ifeq (, $(PRE_GO_111))
ifneq (,$(wildcard go.mod))
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
GO111MODULE := on
ifneq (,$(wildcard vendor))
# Always use the local vendor/ directory to satisfy the dependencies.
GOOPTS := $(GOOPTS) -mod=vendor
endif
endif
else
ifneq (,$(wildcard go.mod))
ifneq (,$(wildcard vendor))
$(warning This repository requires Go >= 1.11 because of Go modules)
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
endif
else
# This repository isn't using Go modules (yet).
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
endif
endif
PROMU := $(FIRST_GOPATH)/bin/promu PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = ./... pkgs = ./...
@ -150,11 +127,7 @@ common-check_license:
.PHONY: common-deps .PHONY: common-deps
common-deps: common-deps:
@echo ">> getting dependencies" @echo ">> getting dependencies"
ifdef GO111MODULE $(GO) mod download
GO111MODULE=$(GO111MODULE) $(GO) mod download
else
$(GO) get $(GOOPTS) -t ./...
endif
.PHONY: update-go-deps .PHONY: update-go-deps
update-go-deps: update-go-deps:
@ -162,20 +135,17 @@ update-go-deps:
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
$(GO) get -d $$m; \ $(GO) get -d $$m; \
done done
GO111MODULE=$(GO111MODULE) $(GO) mod tidy $(GO) mod tidy
ifneq (,$(wildcard vendor))
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
endif
.PHONY: common-test-short .PHONY: common-test-short
common-test-short: $(GOTEST_DIR) common-test-short: $(GOTEST_DIR)
@echo ">> running short tests" @echo ">> running short tests"
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) $(GOTEST) -short $(GOOPTS) $(pkgs)
.PHONY: common-test .PHONY: common-test
common-test: $(GOTEST_DIR) common-test: $(GOTEST_DIR)
@echo ">> running all tests" @echo ">> running all tests"
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
$(GOTEST_DIR): $(GOTEST_DIR):
@mkdir -p $@ @mkdir -p $@
@ -183,25 +153,21 @@ $(GOTEST_DIR):
.PHONY: common-format .PHONY: common-format
common-format: common-format:
@echo ">> formatting code" @echo ">> formatting code"
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) $(GO) fmt $(pkgs)
.PHONY: common-vet .PHONY: common-vet
common-vet: common-vet:
@echo ">> vetting code" @echo ">> vetting code"
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) $(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-lint .PHONY: common-lint
common-lint: $(GOLANGCI_LINT) common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT ifdef GOLANGCI_LINT
@echo ">> running golangci-lint" @echo ">> running golangci-lint"
ifdef GO111MODULE
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained. # Otherwise staticcheck might fail randomly for some reason not yet explained.
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
else
$(GOLANGCI_LINT) run $(pkgs)
endif
endif endif
.PHONY: common-yamllint .PHONY: common-yamllint
@ -218,28 +184,15 @@ endif
common-staticcheck: lint common-staticcheck: lint
.PHONY: common-unused .PHONY: common-unused
common-unused: $(GOVENDOR) common-unused:
ifdef GOVENDOR
@echo ">> running check for unused packages"
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
else
ifdef GO111MODULE
@echo ">> running check for unused/missing packages in go.mod" @echo ">> running check for unused/missing packages in go.mod"
GO111MODULE=$(GO111MODULE) $(GO) mod tidy $(GO) mod tidy
ifeq (,$(wildcard vendor))
@git diff --exit-code -- go.sum go.mod @git diff --exit-code -- go.sum go.mod
else
@echo ">> running check for unused packages in vendor/"
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
@git diff --exit-code -- go.sum go.mod vendor/
endif
endif
endif
.PHONY: common-build .PHONY: common-build
common-build: promu common-build: promu
@echo ">> building binaries" @echo ">> building binaries"
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball .PHONY: common-tarball
common-tarball: promu common-tarball: promu
@ -295,12 +248,6 @@ $(GOLANGCI_LINT):
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif endif
ifdef GOVENDOR
.PHONY: $(GOVENDOR)
$(GOVENDOR):
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
endif
.PHONY: precheck .PHONY: precheck
precheck:: precheck::

View File

@ -10,7 +10,7 @@ This is the [Go](http://golang.org) client library for
instrumenting application code, and one for creating clients that talk to the instrumenting application code, and one for creating clients that talk to the
Prometheus HTTP API. Prometheus HTTP API.
__This library requires Go1.16 or later.__ __This library requires Go1.17 or later.__
## Important note about releases and stability ## Important note about releases and stability

View File

@ -134,7 +134,6 @@ func BenchmarkClient(b *testing.B) {
for _, sizeKB := range []int{4, 50, 1000, 2000} { for _, sizeKB := range []int{4, 50, 1000, 2000} {
b.Run(fmt.Sprintf("%dKB", sizeKB), func(b *testing.B) { b.Run(fmt.Sprintf("%dKB", sizeKB), func(b *testing.B) {
testServer := httptest.NewServer(serveSpaces{sizeKB}) testServer := httptest.NewServer(serveSpaces{sizeKB})
defer testServer.Close() defer testServer.Close()

View File

@ -109,7 +109,6 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) {
stream.WriteRaw(`"`) stream.WriteRaw(`"`)
stream.WriteArrayEnd() stream.WriteArrayEnd()
} }
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
@ -230,25 +229,25 @@ type API interface {
// Config returns the current Prometheus configuration. // Config returns the current Prometheus configuration.
Config(ctx context.Context) (ConfigResult, error) Config(ctx context.Context) (ConfigResult, error)
// DeleteSeries deletes data for a selection of series in a time range. // DeleteSeries deletes data for a selection of series in a time range.
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error
// Flags returns the flag values that Prometheus was launched with. // Flags returns the flag values that Prometheus was launched with.
Flags(ctx context.Context) (FlagsResult, error) Flags(ctx context.Context) (FlagsResult, error)
// LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers.
LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error)
// LabelValues performs a query for the values of the given label, time range and matchers. // LabelValues performs a query for the values of the given label, time range and matchers.
LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error)
// Query performs a query for the given time. // Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error)
// QueryRange performs a query for the given range. // QueryRange performs a query for the given range.
QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error)
// QueryExemplars performs a query for exemplars by the given query and time range. // QueryExemplars performs a query for exemplars by the given query and time range.
QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error)
// Buildinfo returns various build information properties about the Prometheus server // Buildinfo returns various build information properties about the Prometheus server
Buildinfo(ctx context.Context) (BuildinfoResult, error) Buildinfo(ctx context.Context) (BuildinfoResult, error)
// Runtimeinfo returns the various runtime information properties about the Prometheus server. // Runtimeinfo returns the various runtime information properties about the Prometheus server.
Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error)
// Series finds series by label matchers. // Series finds series by label matchers.
Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error)
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand> // Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
// under the TSDB's data directory and returns the directory as response. // under the TSDB's data directory and returns the directory as response.
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
@ -257,9 +256,9 @@ type API interface {
// Targets returns an overview of the current state of the Prometheus target discovery. // Targets returns an overview of the current state of the Prometheus target discovery.
Targets(ctx context.Context) (TargetsResult, error) Targets(ctx context.Context) (TargetsResult, error)
// TargetsMetadata returns metadata about metrics currently scraped by the target. // TargetsMetadata returns metadata about metrics currently scraped by the target.
TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error)
// Metadata returns metadata about metrics currently scraped by the metric name. // Metadata returns metadata about metrics currently scraped by the metric name.
Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error)
// TSDB returns the cardinality statistics. // TSDB returns the cardinality statistics.
TSDB(ctx context.Context) (TSDBResult, error) TSDB(ctx context.Context) (TSDBResult, error)
// WalReplay returns the current replay status of the wal. // WalReplay returns the current replay status of the wal.
@ -699,7 +698,7 @@ func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) {
return res, json.Unmarshal(body, &res) return res, json.Unmarshal(body, &res)
} }
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error {
u := h.client.URL(epDeleteSeries, nil) u := h.client.URL(epDeleteSeries, nil)
q := u.Query() q := u.Query()
@ -772,7 +771,7 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) {
return res, json.Unmarshal(body, &res) return res, json.Unmarshal(body, &res)
} }
func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) { func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) {
u := h.client.URL(epLabels, nil) u := h.client.URL(epLabels, nil)
q := u.Query() q := u.Query()
q.Set("start", formatTime(startTime)) q.Set("start", formatTime(startTime))
@ -795,7 +794,7 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime ti
return labelNames, w, json.Unmarshal(body, &labelNames) return labelNames, w, json.Unmarshal(body, &labelNames)
} }
func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) { func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) {
u := h.client.URL(epLabelValues, map[string]string{"name": label}) u := h.client.URL(epLabelValues, map[string]string{"name": label})
q := u.Query() q := u.Query()
q.Set("start", formatTime(startTime)) q.Set("start", formatTime(startTime))
@ -833,7 +832,6 @@ func WithTimeout(timeout time.Duration) Option {
} }
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) { func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) {
u := h.client.URL(epQuery, nil) u := h.client.URL(epQuery, nil)
q := u.Query() q := u.Query()
@ -890,7 +888,7 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ..
return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) return model.Value(qres.v), warnings, json.Unmarshal(body, &qres)
} }
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) { func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) {
u := h.client.URL(epSeries, nil) u := h.client.URL(epSeries, nil)
q := u.Query() q := u.Query()
@ -973,7 +971,7 @@ func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
return res, json.Unmarshal(body, &res) return res, json.Unmarshal(body, &res)
} }
func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) { func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error) {
u := h.client.URL(epTargetsMetadata, nil) u := h.client.URL(epTargetsMetadata, nil)
q := u.Query() q := u.Query()
@ -997,7 +995,7 @@ func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metri
return res, json.Unmarshal(body, &res) return res, json.Unmarshal(body, &res)
} }
func (h *httpAPI) Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) { func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) {
u := h.client.URL(epMetadata, nil) u := h.client.URL(epMetadata, nil)
q := u.Query() q := u.Query()
@ -1054,7 +1052,7 @@ func (h *httpAPI) WalReplay(ctx context.Context) (WalReplayStatus, error) {
return res, json.Unmarshal(body, &res) return res, json.Unmarshal(body, &res)
} }
func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) { func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error) {
u := h.client.URL(epQueryExemplars, nil) u := h.client.URL(epQueryExemplars, nil)
q := u.Query() q := u.Query()
@ -1162,7 +1160,6 @@ func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Respon
} }
return resp, []byte(result.Data), result.Warnings, err return resp, []byte(result.Data), result.Warnings, err
} }
// DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it // DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it

View File

@ -65,7 +65,6 @@ func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL {
} }
func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) { func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) {
test := c.curTest test := c.curTest
if req.URL.Path != test.reqPath { if req.URL.Path != test.reqPath {
@ -101,7 +100,6 @@ func (c *apiTestClient) DoGetFallback(ctx context.Context, u *url.URL, args url.
} }
func TestAPIs(t *testing.T) { func TestAPIs(t *testing.T) {
testTime := time.Now() testTime := time.Now()
tc := &apiTestClient{ tc := &apiTestClient{
@ -131,7 +129,7 @@ func TestAPIs(t *testing.T) {
} }
} }
doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) { doDeleteSeries := func(matcher string, startTime, endTime time.Time) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) {
return nil, nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime) return nil, nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime)
} }
@ -182,7 +180,7 @@ func TestAPIs(t *testing.T) {
} }
} }
doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) { doSeries := func(matcher string, startTime, endTime time.Time) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) {
return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime) return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime)
} }
@ -209,14 +207,14 @@ func TestAPIs(t *testing.T) {
} }
} }
doTargetsMetadata := func(matchTarget string, metric string, limit string) func() (interface{}, Warnings, error) { doTargetsMetadata := func(matchTarget, metric, limit string) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) {
v, err := promAPI.TargetsMetadata(context.Background(), matchTarget, metric, limit) v, err := promAPI.TargetsMetadata(context.Background(), matchTarget, metric, limit)
return v, nil, err return v, nil, err
} }
} }
doMetadata := func(metric string, limit string) func() (interface{}, Warnings, error) { doMetadata := func(metric, limit string) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) {
v, err := promAPI.Metadata(context.Background(), metric, limit) v, err := promAPI.Metadata(context.Background(), metric, limit)
return v, nil, err return v, nil, err
@ -237,7 +235,7 @@ func TestAPIs(t *testing.T) {
} }
} }
doQueryExemplars := func(query string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) { doQueryExemplars := func(query string, startTime, endTime time.Time) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) {
v, err := promAPI.QueryExemplars(context.Background(), query, startTime, endTime) v, err := promAPI.QueryExemplars(context.Background(), query, startTime, endTime)
return v, nil, err return v, nil, err
@ -471,7 +469,8 @@ func TestAPIs(t *testing.T) {
{ {
"__name__": "up", "__name__": "up",
"job": "prometheus", "job": "prometheus",
"instance": "localhost:9090"}, "instance": "localhost:9090",
},
}, },
reqMethod: "GET", reqMethod: "GET",
reqPath: "/api/v1/series", reqPath: "/api/v1/series",
@ -495,7 +494,8 @@ func TestAPIs(t *testing.T) {
{ {
"__name__": "up", "__name__": "up",
"job": "prometheus", "job": "prometheus",
"instance": "localhost:9090"}, "instance": "localhost:9090",
},
}, },
inWarnings: []string{"a"}, inWarnings: []string{"a"},
reqMethod: "GET", reqMethod: "GET",
@ -586,7 +586,8 @@ func TestAPIs(t *testing.T) {
{ {
"__name__": "up", "__name__": "up",
"job": "prometheus", "job": "prometheus",
"instance": "localhost:9090"}, "instance": "localhost:9090",
},
}, },
reqMethod: "POST", reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/delete_series", reqPath: "/api/v1/admin/tsdb/delete_series",
@ -1115,7 +1116,7 @@ func TestAPIs(t *testing.T) {
"limit": []string{"1"}, "limit": []string{"1"},
}, },
res: map[string][]Metadata{ res: map[string][]Metadata{
"go_goroutines": []Metadata{ "go_goroutines": {
{ {
Type: "gauge", Type: "gauge",
Help: "Number of goroutines that currently exist.", Help: "Number of goroutines that currently exist.",
@ -1523,7 +1524,6 @@ func TestAPIClientDo(t *testing.T) {
for i, test := range tests { for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
tc.ch <- test tc.ch <- test
_, body, warnings, err := client.Do(context.Background(), tc.req) _, body, warnings, err := client.Do(context.Background(), tc.req)
@ -1564,7 +1564,6 @@ func TestAPIClientDo(t *testing.T) {
t.Fatalf("expected body :%v, but got:%v", test.expectedBody, string(body)) t.Fatalf("expected body :%v, but got:%v", test.expectedBody, string(body))
} }
}) })
} }
} }

View File

@ -22,9 +22,10 @@ import (
"os" "os"
"time" "time"
"github.com/prometheus/common/config"
"github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1" v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/config"
) )
func ExampleAPI_query() { func ExampleAPI_query() {

20
go.mod
View File

@ -1,19 +1,31 @@
module github.com/prometheus/client_golang module github.com/prometheus/client_golang
go 1.17
require ( require (
github.com/beorn7/perks v1.0.1 github.com/beorn7/perks v1.0.1
github.com/cespare/xxhash/v2 v2.1.2 github.com/cespare/xxhash/v2 v2.1.2
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/golang/protobuf v1.5.2 github.com/golang/protobuf v1.5.2
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064 github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064
github.com/prometheus/common v0.34.0 github.com/prometheus/common v0.34.0
github.com/prometheus/procfs v0.7.3 github.com/prometheus/procfs v0.7.3
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.0
) )
exclude github.com/prometheus/client_golang v1.12.1 require (
github.com/jpillora/backoff v1.0.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/appengine v1.6.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
go 1.16 exclude github.com/prometheus/client_golang v1.12.1

4
go.sum
View File

@ -262,8 +262,8 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 h1:eJv7u3ksNXoLbGSKuv2s/SIO4tJVxc/A+MTpzxDgz/Q= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -30,7 +30,6 @@ func (c collectorDescribedByCollect) Describe(ch chan<- *Desc) {
} }
func TestDescribeByCollect(t *testing.T) { func TestDescribeByCollect(t *testing.T) {
goodCollector := collectorDescribedByCollect{ goodCollector := collectorDescribedByCollect{
cnt: NewCounter(CounterOpts{Name: "c1", Help: "help c1"}), cnt: NewCounter(CounterOpts{Name: "c1", Help: "help c1"}),
gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}), gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}),

View File

@ -72,7 +72,8 @@ const (
// //
// The current default is GoRuntimeMemStatsCollection, so the compatibility mode with // The current default is GoRuntimeMemStatsCollection, so the compatibility mode with
// client_golang pre v1.12 (move to runtime/metrics). // client_golang pre v1.12 (move to runtime/metrics).
func WithGoCollections(flags GoCollectionOption) goOption { //nolint:staticcheck // Ignore SA1019 until v2.
func WithGoCollections(flags GoCollectionOption) func(options *prometheus.GoCollectorOptions) {
return func(o *goOptions) { return func(o *goOptions) {
o.EnabledCollections = uint32(flags) o.EnabledCollections = uint32(flags)
} }

View File

@ -231,7 +231,7 @@ func TestCounterExemplar(t *testing.T) {
} }
expectedExemplar := &dto.Exemplar{ expectedExemplar := &dto.Exemplar{
Label: []*dto.LabelPair{ Label: []*dto.LabelPair{
&dto.LabelPair{Name: proto.String("foo"), Value: proto.String("bar")}, {Name: proto.String("foo"), Value: proto.String("bar")},
}, },
Value: proto.Float64(42), Value: proto.Float64(42),
Timestamp: ts, Timestamp: ts,

View File

@ -20,6 +20,7 @@ import (
"strings" "strings"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.

View File

@ -108,7 +108,6 @@ func (v *InfoVec) MustCurryWith(labels prometheus.Labels) *InfoVec {
} }
func ExampleMetricVec() { func ExampleMetricVec() {
infoVec := NewInfoVec( infoVec := NewInfoVec(
"library_version_info", "library_version_info",
"Versions of the libraries used in this binary.", "Versions of the libraries used in this binary.",

View File

@ -19,27 +19,25 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
var ( // apiRequestDuration tracks the duration separate for each HTTP status
// apiRequestDuration tracks the duration separate for each HTTP status // class (1xx, 2xx, ...). This creates a fair amount of time series on
// class (1xx, 2xx, ...). This creates a fair amount of time series on // the Prometheus server. Usually, you would track the duration of
// the Prometheus server. Usually, you would track the duration of // serving HTTP request without partitioning by outcome. Do something
// serving HTTP request without partitioning by outcome. Do something // like this only if needed. Also note how only status classes are
// like this only if needed. Also note how only status classes are // tracked, not every single status code. The latter would create an
// tracked, not every single status code. The latter would create an // even larger amount of time series. Request counters partitioned by
// even larger amount of time series. Request counters partitioned by // status code are usually OK as each counter only creates one time
// status code are usually OK as each counter only creates one time // series. Histograms are way more expensive, so partition with care and
// series. Histograms are way more expensive, so partition with care and // only where you really need separate latency tracking. Partitioning by
// only where you really need separate latency tracking. Partitioning by // status class is only an example. In concrete cases, other partitions
// status class is only an example. In concrete cases, other partitions // might make more sense.
// might make more sense. var apiRequestDuration = prometheus.NewHistogramVec(
apiRequestDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{
prometheus.HistogramOpts{ Name: "api_request_duration_seconds",
Name: "api_request_duration_seconds", Help: "Histogram for the request duration of the public API, partitioned by status class.",
Help: "Histogram for the request duration of the public API, partitioned by status class.", Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5), },
}, []string{"status_class"},
[]string{"status_class"},
)
) )
func handler(w http.ResponseWriter, r *http.Request) { func handler(w http.ResponseWriter, r *http.Request) {

View File

@ -19,17 +19,15 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
var ( // If a function is called rarely (i.e. not more often than scrapes
// If a function is called rarely (i.e. not more often than scrapes // happen) or ideally only once (like in a batch job), it can make sense
// happen) or ideally only once (like in a batch job), it can make sense // to use a Gauge for timing the function call. For timing a batch job
// to use a Gauge for timing the function call. For timing a batch job // and pushing the result to a Pushgateway, see also the comprehensive
// and pushing the result to a Pushgateway, see also the comprehensive // example in the push package.
// example in the push package. var funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "example_function_duration_seconds",
Name: "example_function_duration_seconds", Help: "Duration of the last call of an example function.",
Help: "Duration of the last call of an example function.", })
})
)
func run() error { func run() error {
// The Set method of the Gauge is used to observe the duration. // The Set method of the Gauge is used to observe the duration.

View File

@ -20,13 +20,11 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
var ( var requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "example_request_duration_seconds",
Name: "example_request_duration_seconds", Help: "Histogram for the runtime of a simple example function.",
Help: "Histogram for the runtime of a simple example function.", Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
Buckets: prometheus.LinearBuckets(0.01, 0.01, 10), })
})
)
func ExampleTimer() { func ExampleTimer() {
// timer times this example function. It uses a Histogram, but a Summary // timer times this example function. It uses a Histogram, but a Summary

View File

@ -54,7 +54,8 @@ const (
) )
// runtime/metrics names required for runtimeMemStats like logic. // runtime/metrics names required for runtimeMemStats like logic.
var rmForMemStats = []string{goGCHeapTinyAllocsObjects, var rmForMemStats = []string{
goGCHeapTinyAllocsObjects,
goGCHeapAllocsObjects, goGCHeapAllocsObjects,
goGCHeapFreesObjects, goGCHeapFreesObjects,
goGCHeapAllocsBytes, goGCHeapAllocsBytes,

View File

@ -240,9 +240,8 @@ func writeMetric(buf *bufio.Writer, m model.Metric, useTags bool) error {
} }
if useTags { if useTags {
return writeTags(buf, m) return writeTags(buf, m)
} else {
return writeLabels(buf, m, numLabels)
} }
return writeLabels(buf, m, numLabels)
} }
return nil return nil
} }

View File

@ -106,8 +106,8 @@ func NewMatcher(a, b []string) *SequenceMatcher {
} }
func NewMatcherWithJunk(a, b []string, autoJunk bool, func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher { isJunk func(string) bool,
) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b) m.SetSeqs(a, b)
return &m return &m
@ -163,12 +163,12 @@ func (m *SequenceMatcher) chainB() {
m.bJunk = map[string]struct{}{} m.bJunk = map[string]struct{}{}
if m.IsJunk != nil { if m.IsJunk != nil {
junk := m.bJunk junk := m.bJunk
for s, _ := range b2j { for s := range b2j {
if m.IsJunk(s) { if m.IsJunk(s) {
junk[s] = struct{}{} junk[s] = struct{}{}
} }
} }
for s, _ := range junk { for s := range junk {
delete(b2j, s) delete(b2j, s)
} }
} }
@ -183,7 +183,7 @@ func (m *SequenceMatcher) chainB() {
popular[s] = struct{}{} popular[s] = struct{}{}
} }
} }
for s, _ := range popular { for s := range popular {
delete(b2j, s) delete(b2j, s)
} }
} }
@ -270,7 +270,7 @@ func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
for besti+bestsize < ahi && bestj+bestsize < bhi && for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) && !m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] { m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1 bestsize++
} }
// Now that we have a wholly interesting match (albeit possibly // Now that we have a wholly interesting match (albeit possibly
@ -287,7 +287,7 @@ func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
for besti+bestsize < ahi && bestj+bestsize < bhi && for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) && m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] { m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1 bestsize++
} }
return Match{A: besti, B: bestj, Size: bestsize} return Match{A: besti, B: bestj, Size: bestsize}
@ -439,8 +439,10 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
// End the current group and start a new one whenever // End the current group and start a new one whenever
// there is a large range with no changes. // there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn { if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), group = append(group, OpCode{
j1, min(j2, j1+n)}) c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n),
})
groups = append(groups, group) groups = append(groups, group)
group = []OpCode{} group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n) i1, j1 = max(i1, i2-n), max(j1, j2-n)
@ -498,7 +500,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
} }
avail[s] = n - 1 avail[s] = n - 1
if n > 0 { if n > 0 {
matches += 1 matches++
} }
} }
return calculateRatio(matches, len(m.a)+len(m.b)) return calculateRatio(matches, len(m.a)+len(m.b))
@ -522,7 +524,7 @@ func formatRangeUnified(start, stop int) string {
return fmt.Sprintf("%d", beginning) return fmt.Sprintf("%d", beginning)
} }
if length == 0 { if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range beginning-- // empty ranges begin at line just before the range
} }
return fmt.Sprintf("%d,%d", beginning, length) return fmt.Sprintf("%d,%d", beginning, length)
} }
@ -637,7 +639,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{} w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff) err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err return w.String(), err
} }
// Split a string on "\n" while preserving them. The output can be used // Split a string on "\n" while preserving them. The output can be used

View File

@ -58,7 +58,7 @@ func TestGetOptCodes(t *testing.T) {
fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag), fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag),
op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2]) op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2])
} }
result := string(w.Bytes()) result := w.String()
expected := `d a[0:1], (q) b[0:0] () expected := `d a[0:1], (q) b[0:0] ()
e a[1:3], (ab) b[0:2] (ab) e a[1:3], (ab) b[0:2] (ab)
r a[3:4], (x) b[2:3] (y) r a[3:4], (x) b[2:3] (y)
@ -93,7 +93,7 @@ func TestGroupedOpCodes(t *testing.T) {
op.I1, op.I2, op.J1, op.J2) op.I1, op.I2, op.J1, op.J2)
} }
} }
result := string(w.Bytes()) result := w.String()
expected := `group expected := `group
e, 5, 8, 5, 8 e, 5, 8, 5, 8
i, 8, 8, 8, 9 i, 8, 8, 8, 9
@ -185,14 +185,14 @@ func TestWithAsciiBJunk(t *testing.T) {
sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}}) assertEqual(t, sm.bJunk, map[string]struct{}{" ": {}})
isJunk = func(s string) bool { isJunk = func(s string) bool {
return s == " " || s == "b" return s == " " || s == "b"
} }
sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)), sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk) splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}}) assertEqual(t, sm.bJunk, map[string]struct{}{" ": {}, "b": {}})
} }
func TestSFBugsRatioForNullSeqn(t *testing.T) { func TestSFBugsRatioForNullSeqn(t *testing.T) {

View File

@ -75,5 +75,4 @@ func TestWithExemplarsMetric(t *testing.T) {
} }
} }
}) })
} }

View File

@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err return n, err
} }
type closeNotifierDelegator struct{ *responseWriterDelegator } type (
type flusherDelegator struct{ *responseWriterDelegator } closeNotifierDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator } hijackerDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator } readerFromDelegator struct{ *responseWriterDelegator }
pusherDelegator struct{ *responseWriterDelegator }
)
func (d closeNotifierDelegator) CloseNotify() <-chan bool { func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify() return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
} }
func (d flusherDelegator) Flush() { func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is // If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately. // handled appropriately.
@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() {
} }
d.ResponseWriter.(http.Flusher).Flush() d.ResponseWriter.(http.Flusher).Flush()
} }
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack() return d.ResponseWriter.(http.Hijacker).Hijack()
} }
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is // If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately. // handled appropriately.
@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
d.written += n d.written += n
return n, err return n, err
} }
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts) return d.ResponseWriter.(http.Pusher).Push(target, opts)
} }
@ -261,7 +267,7 @@ func init() {
http.Flusher http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
} }
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
return struct { return struct {
*responseWriterDelegator *responseWriterDelegator
http.Pusher http.Pusher

View File

@ -24,8 +24,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
) )
type errorCollector struct{} type errorCollector struct{}

View File

@ -246,7 +246,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
// Collector does not have a Desc or has more than one Desc or its Desc is // Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried // invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method". // labels that are not named "code" or "method".
func checkLabels(c prometheus.Collector) (code bool, method bool) { func checkLabels(c prometheus.Collector) (code, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels // TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried. // once Descriptors can have their dimensionality queried.
var ( var (

View File

@ -279,7 +279,7 @@ func TestLabels(t *testing.T) {
ok: false, ok: false,
}, },
} }
checkLabels := func(labels []string) (gotCode bool, gotMethod bool) { checkLabels := func(labels []string) (gotCode, gotMethod bool) {
for _, label := range labels { for _, label := range labels {
switch label { switch label {
case "code": case "code":

View File

@ -50,11 +50,10 @@ func ExampleInstrumentHandlerWithExtraMethods() {
// Instrument the handlers with all the metrics, injecting the "handler" // Instrument the handlers with all the metrics, injecting the "handler"
// label by currying. // label by currying.
pullChain := pullChain := InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}),
InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}), InstrumentHandlerCounter(counter, pullHandler, opts),
InstrumentHandlerCounter(counter, pullHandler, opts), opts,
opts, )
)
http.Handle("/metrics", Handler()) http.Handle("/metrics", Handler())
http.Handle("/pull", pullChain) http.Handle("/pull", pullChain)

View File

@ -98,9 +98,7 @@ func New(url, job string) *Pusher {
if !strings.Contains(url, "://") { if !strings.Contains(url, "://") {
url = "http://" + url url = "http://" + url
} }
if strings.HasSuffix(url, "/") { url = strings.TrimSuffix(url, "/")
url = url[:len(url)-1]
}
return &Pusher{ return &Pusher{
error: err, error: err,
@ -273,7 +271,11 @@ func (p *Pusher) push(ctx context.Context, method string) error {
} }
} }
} }
enc.Encode(mf) if err := enc.Encode(mf); err != nil {
return fmt.Errorf(
"failed to encode metric familty %s, error is %w",
mf.GetName(), err)
}
} }
req, err := http.NewRequestWithContext(ctx, method, p.fullURL(), buf) req, err := http.NewRequestWithContext(ctx, method, p.fullURL(), buf)
if err != nil { if err != nil {

View File

@ -26,7 +26,6 @@ import (
) )
func TestPush(t *testing.T) { func TestPush(t *testing.T) {
var ( var (
lastMethod string lastMethod string
lastBody []byte lastBody []byte

View File

@ -582,7 +582,7 @@ func WriteToTextfile(filename string, g Gatherer) error {
return err return err
} }
if err := os.Chmod(tmp.Name(), 0644); err != nil { if err := os.Chmod(tmp.Name(), 0o644); err != nil {
return err return err
} }
return os.Rename(tmp.Name(), filename) return os.Rename(tmp.Name(), filename)

View File

@ -349,7 +349,7 @@ collected metric "broken_metric" { label:<name:"foo" value:"bar" > label:<name:"
body []byte body []byte
} }
var scenarios = []struct { scenarios := []struct {
headers map[string]string headers map[string]string
out output out output
collector prometheus.Collector collector prometheus.Collector
@ -1118,7 +1118,6 @@ func (m *collidingCollector) Collect(metric chan<- prometheus.Metric) {
// TestAlreadyRegistered will fail with the old, weaker hash function. It is // TestAlreadyRegistered will fail with the old, weaker hash function. It is
// taken from https://play.golang.org/p/HpV7YE6LI_4 , authored by @awilliams. // taken from https://play.golang.org/p/HpV7YE6LI_4 , authored by @awilliams.
func TestAlreadyRegisteredCollision(t *testing.T) { func TestAlreadyRegisteredCollision(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {

View File

@ -283,7 +283,7 @@ func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
// metricUnits attempts to detect known unit types used as part of a metric name, // metricUnits attempts to detect known unit types used as part of a metric name,
// e.g. "foo_bytes_total" or "bar_baz_milligrams". // e.g. "foo_bytes_total" or "bar_baz_milligrams".
func metricUnits(m string) (unit string, base string, ok bool) { func metricUnits(m string) (unit, base string, ok bool) {
ss := strings.Split(m, "_") ss := strings.Split(m, "_")
for unit, base := range units { for unit, base := range units {

View File

@ -330,7 +330,8 @@ thermometers_rankine 10
Metric: "thermometers_rankine", Metric: "thermometers_rankine",
Text: `use base unit "celsius" instead of "rankine"`, Text: `use base unit "celsius" instead of "rankine"`,
}}, }},
}, { },
{
name: "inches", name: "inches",
in: ` in: `
# HELP x_inches Test metric. # HELP x_inches Test metric.
@ -341,7 +342,8 @@ x_inches 10
Metric: "x_inches", Metric: "x_inches",
Text: `use base unit "meters" instead of "inches"`, Text: `use base unit "meters" instead of "inches"`,
}}, }},
}, { },
{
name: "yards", name: "yards",
in: ` in: `
# HELP x_yards Test metric. # HELP x_yards Test metric.
@ -352,7 +354,8 @@ x_yards 10
Metric: "x_yards", Metric: "x_yards",
Text: `use base unit "meters" instead of "yards"`, Text: `use base unit "meters" instead of "yards"`,
}}, }},
}, { },
{
name: "miles", name: "miles",
in: ` in: `
# HELP x_miles Test metric. # HELP x_miles Test metric.
@ -363,7 +366,8 @@ x_miles 10
Metric: "x_miles", Metric: "x_miles",
Text: `use base unit "meters" instead of "miles"`, Text: `use base unit "meters" instead of "miles"`,
}}, }},
}, { },
{
name: "bits", name: "bits",
in: ` in: `
# HELP x_bits Test metric. # HELP x_bits Test metric.

View File

@ -101,7 +101,9 @@ func ToFloat64(c prometheus.Collector) float64 {
} }
pb := &dto.Metric{} pb := &dto.Metric{}
m.Write(pb) if err := m.Write(pb); err != nil {
panic(fmt.Errorf("error happened while collecting metrics: %w", err))
}
if pb.Gauge != nil { if pb.Gauge != nil {
return pb.Gauge.GetValue() return pb.Gauge.GetValue()
} }
@ -221,7 +223,7 @@ func compare(got, want []*dto.MetricFamily) error {
// diff returns a diff of both values as long as both are of the same type and // diff returns a diff of both values as long as both are of the same type and
// are a struct, map, slice, array or string. Otherwise it returns an empty string. // are a struct, map, slice, array or string. Otherwise it returns an empty string.
func diff(expected interface{}, actual interface{}) string { func diff(expected, actual interface{}) string {
if expected == nil || actual == nil { if expected == nil || actual == nil {
return "" return ""
} }

View File

@ -148,5 +148,4 @@ func TestTimerByOutcome(t *testing.T) {
if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got { if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
} }
} }

View File

@ -21,9 +21,10 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )

View File

@ -571,7 +571,7 @@ func findMetricWithLabels(
return len(metrics) return len(metrics)
} }
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
if len(values) != len(lvs)+len(curry) { if len(values) != len(lvs)+len(curry) {
return false return false
} }

View File

@ -49,7 +49,7 @@ func testDelete(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
@ -57,7 +57,7 @@ func testDelete(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
@ -65,7 +65,7 @@ func testDelete(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
@ -103,8 +103,8 @@ func testDeleteLabelValues(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision. vec.With(Labels{"l1": "v1", "l2": "v3"}).Set(42) // Add junk data for collision.
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
@ -115,7 +115,7 @@ func testDeleteLabelValues(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
// Delete out of order. // Delete out of order.
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
@ -146,9 +146,9 @@ func TestDeletePartialMatch(t *testing.T) {
} }
baseVec.With(Labels{"l1": "baseValue1", "l2": "baseValue2", "l3": "baseValue3"}).Inc() baseVec.With(Labels{"l1": "baseValue1", "l2": "baseValue2", "l3": "baseValue3"}).Inc()
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff1BaseValue2", "l3": "v3"}).(Gauge).Set(42) baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff1BaseValue2", "l3": "v3"}).Set(42)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff2BaseValue2", "l3": "v3"}).(Gauge).Set(84) baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff2BaseValue2", "l3": "v3"}).Set(84)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff3BaseValue2", "l3": "v3"}).(Gauge).Set(168) baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff3BaseValue2", "l3": "v3"}).Set(168)
curriedVec := baseVec.MustCurryWith(Labels{"l2": "curriedValue2"}) curriedVec := baseVec.MustCurryWith(Labels{"l2": "curriedValue2"})
curriedVec.WithLabelValues("curriedValue1", "curriedValue3").Inc() curriedVec.WithLabelValues("curriedValue1", "curriedValue3").Inc()
@ -249,7 +249,7 @@ func testMetricVec(t *testing.T, vec *GaugeVec) {
vec.WithLabelValues(pair[0], pair[1]).Inc() vec.WithLabelValues(pair[0], pair[1]).Inc()
expected[[2]string{"v1", "v2"}]++ expected[[2]string{"v1", "v2"}]++
vec.WithLabelValues("v1", "v2").(Gauge).Inc() vec.WithLabelValues("v1", "v2").Inc()
} }
var total int var total int
@ -351,7 +351,6 @@ func TestCurryVecWithCollisions(t *testing.T) {
} }
func testCurryVec(t *testing.T, vec *CounterVec) { func testCurryVec(t *testing.T, vec *CounterVec) {
assertMetrics := func(t *testing.T) { assertMetrics := func(t *testing.T) {
n := 0 n := 0
for _, m := range vec.metricMap.metrics { for _, m := range vec.metricMap.metrics {
@ -538,7 +537,6 @@ func testCurryVec(t *testing.T, vec *CounterVec) {
} else if err.Error() != `label name "three" is already curried` { } else if err.Error() != `label name "three" is already curried` {
t.Error("currying returned unexpected error:", err) t.Error("currying returned unexpected error:", err)
} }
}) })
t.Run("unknown label", func(t *testing.T) { t.Run("unknown label", func(t *testing.T) {
if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil { if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil {

View File

@ -20,8 +20,9 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
) )
// WrapRegistererWith returns a Registerer wrapping the provided // WrapRegistererWith returns a Registerer wrapping the provided

View File

@ -46,7 +46,6 @@ func toMetricFamilies(cs ...Collector) []*dto.MetricFamily {
} }
func TestWrap(t *testing.T) { func TestWrap(t *testing.T) {
simpleCnt := NewCounter(CounterOpts{ simpleCnt := NewCounter(CounterOpts{
Name: "simpleCnt", Name: "simpleCnt",
Help: "helpSimpleCnt", Help: "helpSimpleCnt",
@ -319,7 +318,6 @@ func TestWrap(t *testing.T) {
} }
}) })
} }
} }
func TestNil(t *testing.T) { func TestNil(t *testing.T) {

View File

@ -0,0 +1,5 @@
// The following 2 methods always return nil as the error
(*github.com/cespare/xxhash/v2.Digest).Write
(*github.com/cespare/xxhash/v2.Digest).WriteString
(*bufio.Writer).WriteRune