Compare commits
20 Commits
b71534a2b4
...
cc40069302
Author | SHA1 | Date |
---|---|---|
eduarrrd | cc40069302 | |
PrometheusBot | 13851e9287 | |
Ivan Goncharov | a934c35951 | |
Matthieu MOREL | bab92a7743 | |
PrometheusBot | 400ee29a10 | |
Arthur Silva Sens | 2b11a4ba39 | |
Matthieu MOREL | fcfad5c0b9 | |
github-actions[bot] | 1aa11d0498 | |
dependabot[bot] | abfb25769f | |
Ben Kochie | 6de54aaefd | |
Bartlomiej Plotka | 02883cbc82 | |
Matt Harbison | 3c21cc0ecf | |
prombot | 8aea698d6a | |
Ivan Goncharov | 78d7a94e46 | |
Arthur Silva Sens | 0c73c1c554 | |
Ivan Goncharov | 6d099da971 | |
Ivan Goncharov | 7877776aa6 | |
Bartlomiej Plotka | d3f28401b9 | |
Jan-Otto Kröpke | ece74f8b01 | |
eduarrrd | 86e4b57547 |
|
@ -46,11 +46,11 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
uses: github/codeql-action/init@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
|
@ -61,7 +61,7 @@ jobs:
|
|||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
uses: github/codeql-action/autobuild@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
@ -75,4 +75,4 @@ jobs:
|
|||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
uses: github/codeql-action/analyze@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set docker hub repo name
|
||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||
- name: Push README to Dockerhub
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set quay.io org name
|
||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||
- name: Set quay.io repo name
|
||||
|
|
|
@ -23,9 +23,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Lint
|
||||
uses: dagger/dagger-for-github@fc945fa66fc7bfa72bc80f85b1a1ef4bd1d30cbb # v6.11.0
|
||||
uses: dagger/dagger-for-github@ad6a4e308a42fb2cf9be8b060f9aba9d57d4c9aa # v6.14.0
|
||||
with:
|
||||
version: "latest"
|
||||
verb: call
|
||||
|
|
|
@ -24,7 +24,7 @@ jobs:
|
|||
supported_versions: ${{ steps.matrix.outputs.supported_versions }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Read supported_go_versions.txt
|
||||
id: matrix
|
||||
run: |
|
||||
|
@ -43,17 +43,17 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Run tests and check license
|
||||
uses: dagger/dagger-for-github@fc945fa66fc7bfa72bc80f85b1a1ef4bd1d30cbb # v6.11.0
|
||||
uses: dagger/dagger-for-github@ad6a4e308a42fb2cf9be8b060f9aba9d57d4c9aa # v6.14.0
|
||||
with:
|
||||
version: "latest"
|
||||
verb: call
|
||||
args: -vvv --src . make --go-version ${{matrix.go_version}} --args 'check_license test'
|
||||
|
||||
- name: Run style and unused
|
||||
uses: dagger/dagger-for-github@fc945fa66fc7bfa72bc80f85b1a1ef4bd1d30cbb # v6.11.0
|
||||
uses: dagger/dagger-for-github@ad6a4e308a42fb2cf9be8b060f9aba9d57d4c9aa # v6.14.0
|
||||
if: ${{ matrix.go_version == '1.21' }}
|
||||
with:
|
||||
version: "latest"
|
||||
|
|
|
@ -24,16 +24,16 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
|
||||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
with:
|
||||
args: --verbose
|
||||
version: v1.60.2
|
||||
version: v1.61.0
|
||||
|
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Execute bash script
|
||||
run: bash update-go-version.bash
|
||||
|
|
|
@ -25,11 +25,13 @@ linters:
|
|||
- ineffassign
|
||||
- misspell
|
||||
- nolintlint
|
||||
- perfsprint
|
||||
- predeclared
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- wastedassign
|
||||
|
||||
issues:
|
||||
|
@ -66,6 +68,17 @@ linters-settings:
|
|||
local-prefixes: github.com/prometheus/client_golang
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
perfsprint:
|
||||
# Optimizes even if it requires an int or uint type cast.
|
||||
int-conversion: true
|
||||
# Optimizes into `err.Error()` even if it is only equivalent for non-nil errors.
|
||||
err-error: true
|
||||
# Optimizes `fmt.Errorf`.
|
||||
errorf: true
|
||||
# Optimizes `fmt.Sprintf` with only one argument.
|
||||
sprintf1: true
|
||||
# Optimizes into strings concatenation.
|
||||
strconcat: true
|
||||
revive:
|
||||
rules:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter
|
||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
||||
GOLANGCI_LINT_VERSION ?= v1.61.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
|
@ -16,13 +16,13 @@ package v1
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -260,7 +260,7 @@ func TestAPIs(t *testing.T) {
|
|||
},
|
||||
{
|
||||
do: doQuery("2", testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/query",
|
||||
|
@ -336,7 +336,7 @@ func TestAPIs(t *testing.T) {
|
|||
End: testTime,
|
||||
Step: 1 * time.Minute,
|
||||
}, WithTimeout(5*time.Second)),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/query_range",
|
||||
|
@ -361,14 +361,14 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doLabelNames(nil, testTime.Add(-100*time.Hour), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/labels",
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
{
|
||||
do: doLabelNames(nil, testTime.Add(-100*time.Hour), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
inWarnings: []string{"a"},
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/labels",
|
||||
|
@ -400,14 +400,14 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doLabelValues(nil, "mylabel", testTime.Add(-100*time.Hour), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/label/mylabel/values",
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
{
|
||||
do: doLabelValues(nil, "mylabel", testTime.Add(-100*time.Hour), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
inWarnings: []string{"a"},
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/label/mylabel/values",
|
||||
|
@ -464,7 +464,7 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/series",
|
||||
err: errors.New("some error"),
|
||||
|
@ -472,7 +472,7 @@ func TestAPIs(t *testing.T) {
|
|||
// Series with error and warning.
|
||||
{
|
||||
do: doSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
inWarnings: []string{"a"},
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/series",
|
||||
|
@ -493,7 +493,7 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doSnapshot(true),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/snapshot",
|
||||
err: errors.New("some error"),
|
||||
|
@ -507,7 +507,7 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doCleanTombstones(),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/clean_tombstones",
|
||||
err: errors.New("some error"),
|
||||
|
@ -528,7 +528,7 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/delete_series",
|
||||
err: errors.New("some error"),
|
||||
|
@ -550,8 +550,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doConfig(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/config",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -578,16 +578,16 @@ func TestAPIs(t *testing.T) {
|
|||
do: doFlags(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/flags",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doBuildinfo(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/buildinfo",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -616,8 +616,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doRuntimeinfo(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/runtimeinfo",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -684,8 +684,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doAlertManagers(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/alertmanagers",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -891,8 +891,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doRules(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/rules",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -971,8 +971,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doTargets(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/targets",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -1005,7 +1005,7 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doTargetsMetadata("{job=\"prometheus\"}", "go_goroutines", "1"),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/targets/metadata",
|
||||
err: errors.New("some error"),
|
||||
|
@ -1037,7 +1037,7 @@ func TestAPIs(t *testing.T) {
|
|||
|
||||
{
|
||||
do: doMetadata("", "1"),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/metadata",
|
||||
err: errors.New("some error"),
|
||||
|
@ -1047,8 +1047,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doTSDB(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/tsdb",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -1127,8 +1127,8 @@ func TestAPIs(t *testing.T) {
|
|||
do: doWalReply(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/walreplay",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
inErr: errors.New("some error"),
|
||||
err: errors.New("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
|
@ -1212,7 +1212,7 @@ func TestAPIs(t *testing.T) {
|
|||
tests = append(tests, queryTests...)
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
tc.curTest = test
|
||||
|
||||
res, warnings, err := test.do()
|
||||
|
@ -1430,7 +1430,7 @@ func TestAPIClientDo(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
tc.ch <- test
|
||||
|
||||
_, body, warnings, err := client.Do(context.Background(), tc.req)
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -50,7 +50,7 @@ func main() {
|
|||
// Record fictional latency.
|
||||
now := time.Now()
|
||||
requestDurations.(prometheus.ExemplarObserver).ObserveWithExemplar(
|
||||
time.Since(now).Seconds(), prometheus.Labels{"dummyID": fmt.Sprint(rand.Intn(100000))},
|
||||
time.Since(now).Seconds(), prometheus.Labels{"dummyID": strconv.Itoa(rand.Intn(100000))},
|
||||
)
|
||||
time.Sleep(600 * time.Millisecond)
|
||||
}
|
||||
|
|
|
@ -18,11 +18,11 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -116,7 +116,7 @@ func main() {
|
|||
// the ExemplarObserver interface and thus don't need to
|
||||
// check the outcome of the type assertion.
|
||||
m.rpcDurationsHistogram.(prometheus.ExemplarObserver).ObserveWithExemplar(
|
||||
v, prometheus.Labels{"dummyID": fmt.Sprint(rand.Intn(100000))},
|
||||
v, prometheus.Labels{"dummyID": strconv.Itoa(rand.Intn(100000))},
|
||||
)
|
||||
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// atomicUpdateFloat atomically updates the float64 value pointed to by bits
|
||||
// using the provided updateFunc, with an exponential backoff on contention.
|
||||
func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) {
|
||||
const (
|
||||
// both numbers are derived from empirical observations
|
||||
// documented in this PR: https://github.com/prometheus/client_golang/pull/1661
|
||||
maxBackoff = 320 * time.Millisecond
|
||||
initialBackoff = 10 * time.Millisecond
|
||||
)
|
||||
backoff := initialBackoff
|
||||
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
oldFloat := math.Float64frombits(loadedBits)
|
||||
newFloat := updateFunc(oldFloat)
|
||||
newBits := math.Float64bits(newFloat)
|
||||
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
} else {
|
||||
// Exponential backoff with sleep and cap to avoid infinite wait
|
||||
time.Sleep(backoff)
|
||||
backoff *= 2
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var output float64
|
||||
|
||||
func TestAtomicUpdateFloat(t *testing.T) {
|
||||
var val float64 = 0.0
|
||||
bits := (*uint64)(unsafe.Pointer(&val))
|
||||
var wg sync.WaitGroup
|
||||
numGoroutines := 100000
|
||||
increment := 1.0
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
atomicUpdateFloat(bits, func(f float64) float64 {
|
||||
return f + increment
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
expected := float64(numGoroutines) * increment
|
||||
if val != expected {
|
||||
t.Errorf("Expected %f, got %f", expected, val)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark for atomicUpdateFloat with single goroutine (no contention).
|
||||
func BenchmarkAtomicUpdateFloat_SingleGoroutine(b *testing.B) {
|
||||
var val float64 = 0.0
|
||||
bits := (*uint64)(unsafe.Pointer(&val))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
atomicUpdateFloat(bits, func(f float64) float64 {
|
||||
return f + 1.0
|
||||
})
|
||||
}
|
||||
|
||||
output = val
|
||||
}
|
||||
|
||||
// Benchmark for old implementation with single goroutine (no contention) -> to check overhead of backoff
|
||||
func BenchmarkAtomicNoBackoff_SingleGoroutine(b *testing.B) {
|
||||
var val float64 = 0.0
|
||||
bits := (*uint64)(unsafe.Pointer(&val))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0)
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output = val
|
||||
}
|
||||
|
||||
// Benchmark varying the number of goroutines.
|
||||
func benchmarkAtomicUpdateFloatConcurrency(b *testing.B, numGoroutines int) {
|
||||
var val float64 = 0.0
|
||||
bits := (*uint64)(unsafe.Pointer(&val))
|
||||
b.SetParallelism(numGoroutines)
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
atomicUpdateFloat(bits, func(f float64) float64 {
|
||||
return f + 1.0
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
output = val
|
||||
}
|
||||
|
||||
func benchmarkAtomicNoBackoffFloatConcurrency(b *testing.B, numGoroutines int) {
|
||||
var val float64 = 0.0
|
||||
bits := (*uint64)(unsafe.Pointer(&val))
|
||||
b.SetParallelism(numGoroutines)
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0)
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
output = val
|
||||
}
|
||||
|
||||
func BenchmarkAtomicUpdateFloat_1Goroutine(b *testing.B) {
|
||||
benchmarkAtomicUpdateFloatConcurrency(b, 1)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicNoBackoff_1Goroutine(b *testing.B) {
|
||||
benchmarkAtomicNoBackoffFloatConcurrency(b, 1)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicUpdateFloat_2Goroutines(b *testing.B) {
|
||||
benchmarkAtomicUpdateFloatConcurrency(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicNoBackoff_2Goroutines(b *testing.B) {
|
||||
benchmarkAtomicNoBackoffFloatConcurrency(b, 2)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicUpdateFloat_4Goroutines(b *testing.B) {
|
||||
benchmarkAtomicUpdateFloatConcurrency(b, 4)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicNoBackoff_4Goroutines(b *testing.B) {
|
||||
benchmarkAtomicNoBackoffFloatConcurrency(b, 4)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicUpdateFloat_8Goroutines(b *testing.B) {
|
||||
benchmarkAtomicUpdateFloatConcurrency(b, 8)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicNoBackoff_8Goroutines(b *testing.B) {
|
||||
benchmarkAtomicNoBackoffFloatConcurrency(b, 8)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicUpdateFloat_16Goroutines(b *testing.B) {
|
||||
benchmarkAtomicUpdateFloatConcurrency(b, 16)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicNoBackoff_16Goroutines(b *testing.B) {
|
||||
benchmarkAtomicNoBackoffFloatConcurrency(b, 16)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicUpdateFloat_32Goroutines(b *testing.B) {
|
||||
benchmarkAtomicUpdateFloatConcurrency(b, 32)
|
||||
}
|
||||
|
||||
func BenchmarkAtomicNoBackoff_32Goroutines(b *testing.B) {
|
||||
benchmarkAtomicNoBackoffFloatConcurrency(b, 32)
|
||||
}
|
|
@ -134,13 +134,9 @@ func (c *counter) Add(v float64) {
|
|||
return
|
||||
}
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&c.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
}
|
||||
|
||||
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -120,10 +119,10 @@ func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) {
|
|||
|
||||
expectPanic(t, func() {
|
||||
counterVec.WithLabelValues(labelValues...)
|
||||
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
|
||||
}, "WithLabelValues: expected panic because: "+test.desc)
|
||||
expectPanic(t, func() {
|
||||
counterVec.With(test.labels)
|
||||
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
|
||||
}, "WithLabelValues: expected panic because: "+test.desc)
|
||||
|
||||
if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil {
|
||||
t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc)
|
||||
|
|
|
@ -120,13 +120,9 @@ func (g *gauge) Dec() {
|
|||
}
|
||||
|
||||
func (g *gauge) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&g.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 {
|
||||
return oldVal + val
|
||||
})
|
||||
}
|
||||
|
||||
func (g *gauge) Sub(val float64) {
|
||||
|
|
|
@ -858,15 +858,35 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||
// findBucket returns the index of the bucket for the provided value, or
|
||||
// len(h.upperBounds) for the +Inf bucket.
|
||||
func (h *histogram) findBucket(v float64) int {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
// of buckets.
|
||||
//
|
||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
n := len(h.upperBounds)
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Early exit: if v is less than or equal to the first upper bound, return 0
|
||||
if v <= h.upperBounds[0] {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
|
||||
if v > h.upperBounds[n-1] {
|
||||
return n
|
||||
}
|
||||
|
||||
// For small arrays, use simple linear search
|
||||
// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
|
||||
// see more details here: https://github.com/prometheus/client_golang/pull/1662
|
||||
if n < 35 {
|
||||
for i, bound := range h.upperBounds {
|
||||
if v <= bound {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// If v is greater than all upper bounds, return len(h.upperBounds)
|
||||
return n
|
||||
}
|
||||
|
||||
// For larger arrays, use stdlib's binary search
|
||||
return sort.SearchFloat64s(h.upperBounds, v)
|
||||
}
|
||||
|
||||
|
@ -1621,13 +1641,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) {
|
|||
// atomicAddFloat adds the provided float atomically to another float
|
||||
// represented by the bit pattern the bits pointer is pointing to.
|
||||
func atomicAddFloat(bits *uint64, v float64) {
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(bits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
}
|
||||
|
||||
// atomicDecUint32 atomically decrements the uint32 p points to. See
|
||||
|
|
|
@ -1455,3 +1455,91 @@ func compareNativeExemplarValues(t *testing.T, exps []*dto.Exemplar, values []fl
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resultFindBucket int
|
||||
|
||||
func benchmarkFindBucket(b *testing.B, l int) {
|
||||
h := &histogram{upperBounds: make([]float64, l)}
|
||||
for i := range h.upperBounds {
|
||||
h.upperBounds[i] = float64(i)
|
||||
}
|
||||
v := float64(l / 2)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
resultFindBucket = h.findBucket(v)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFindBucketShort(b *testing.B) {
|
||||
benchmarkFindBucket(b, 20)
|
||||
}
|
||||
|
||||
func BenchmarkFindBucketMid(b *testing.B) {
|
||||
benchmarkFindBucket(b, 40)
|
||||
}
|
||||
|
||||
func BenchmarkFindBucketLarge(b *testing.B) {
|
||||
benchmarkFindBucket(b, 100)
|
||||
}
|
||||
|
||||
func BenchmarkFindBucketHuge(b *testing.B) {
|
||||
benchmarkFindBucket(b, 500)
|
||||
}
|
||||
|
||||
func BenchmarkFindBucketInf(b *testing.B) {
|
||||
h := &histogram{upperBounds: make([]float64, 500)}
|
||||
for i := range h.upperBounds {
|
||||
h.upperBounds[i] = float64(i)
|
||||
}
|
||||
v := 1000.5
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
resultFindBucket = h.findBucket(v)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFindBucketLow(b *testing.B) {
|
||||
h := &histogram{upperBounds: make([]float64, 500)}
|
||||
for i := range h.upperBounds {
|
||||
h.upperBounds[i] = float64(i)
|
||||
}
|
||||
v := -1.1
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
resultFindBucket = h.findBucket(v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindBucket(t *testing.T) {
|
||||
smallHistogram := &histogram{upperBounds: []float64{1, 2, 3, 4, 5}}
|
||||
largeHistogram := &histogram{upperBounds: make([]float64, 50)}
|
||||
for i := range largeHistogram.upperBounds {
|
||||
largeHistogram.upperBounds[i] = float64(i)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
h *histogram
|
||||
v float64
|
||||
expected int
|
||||
}{
|
||||
{smallHistogram, -1, 0},
|
||||
{smallHistogram, 0.5, 0},
|
||||
{smallHistogram, 2.5, 2},
|
||||
{smallHistogram, 5.5, 5},
|
||||
{largeHistogram, -1, 0},
|
||||
{largeHistogram, 25.5, 26},
|
||||
{largeHistogram, 49.5, 50},
|
||||
{largeHistogram, 50.5, 50},
|
||||
{largeHistogram, 5000.5, 50},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := tt.h.findBucket(tt.v)
|
||||
if result != tt.expected {
|
||||
t.Errorf("findBucket(%v) = %d; expected %d", tt.v, result, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
|
|||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
return strconv.Itoa(beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning-- // empty ranges begin at line just before the range
|
||||
|
|
|
@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string {
|
|||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
switch {
|
||||
case namespace != "" && subsystem != "":
|
||||
return strings.Join([]string{namespace, subsystem, name}, "_")
|
||||
case namespace != "":
|
||||
return strings.Join([]string{namespace, name}, "_")
|
||||
case subsystem != "":
|
||||
return strings.Join([]string{subsystem, name}, "_")
|
||||
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
|
||||
|
||||
if namespace != "" {
|
||||
sb.WriteString(namespace)
|
||||
sb.WriteString("_")
|
||||
}
|
||||
return name
|
||||
|
||||
if subsystem != "" {
|
||||
sb.WriteString(subsystem)
|
||||
sb.WriteString("_")
|
||||
}
|
||||
|
||||
sb.WriteString(name)
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
|
|
|
@ -15,12 +15,16 @@
|
|||
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/task.h>
|
||||
// Compiler warns that shared_memory_server.h is deprecated, use this instead.
|
||||
// But this doesn't define SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE.
|
||||
//#include <mach/shared_region.h>
|
||||
#include <mach/shared_memory_server.h>
|
||||
#include <mach/mach_vm.h>
|
||||
|
||||
// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
|
||||
// mach/shared_region.h instead. But that doesn't define
|
||||
// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
|
||||
// avoid a warning message when running tests.
|
||||
#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U
|
||||
#define SHARED_DATA_REGION_SIZE 0x10000000
|
||||
#define SHARED_TEXT_REGION_SIZE 0x10000000
|
||||
|
||||
|
||||
int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
|
||||
{
|
||||
|
|
|
@ -22,9 +22,7 @@ import "C"
|
|||
import "fmt"
|
||||
|
||||
func getMemory() (*memoryInfo, error) {
|
||||
var (
|
||||
rss, vsize C.ulonglong
|
||||
)
|
||||
var rss, vsize C.ulonglong
|
||||
|
||||
if err := C.get_memory_info(&rss, &vsize); err != 0 {
|
||||
return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
|
||||
|
|
|
@ -98,7 +98,7 @@ func readCompressedBody(r io.Reader, comp Compression) (string, error) {
|
|||
got, err := io.ReadAll(reader)
|
||||
return string(got), err
|
||||
}
|
||||
return "", fmt.Errorf("Unsupported compression")
|
||||
return "", errors.New("Unsupported compression")
|
||||
}
|
||||
|
||||
func TestHandlerErrorHandling(t *testing.T) {
|
||||
|
@ -131,7 +131,7 @@ func TestHandlerErrorHandling(t *testing.T) {
|
|||
logger := log.New(logBuf, "", 0)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add("Accept", "test/plain")
|
||||
|
||||
mReg := &mockTransactionGatherer{g: reg}
|
||||
|
@ -252,7 +252,7 @@ func TestInstrumentMetricHandler(t *testing.T) {
|
|||
// Do it again to test idempotency.
|
||||
InstrumentMetricHandler(reg, HandlerForTransactional(mReg, HandlerOpts{}))
|
||||
writer := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add(acceptHeader, acceptTextPlain)
|
||||
|
||||
handler.ServeHTTP(writer, request)
|
||||
|
@ -311,7 +311,7 @@ func TestHandlerMaxRequestsInFlight(t *testing.T) {
|
|||
w1 := httptest.NewRecorder()
|
||||
w2 := httptest.NewRecorder()
|
||||
w3 := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add(acceptHeader, acceptTextPlain)
|
||||
|
||||
c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
|
||||
|
@ -348,7 +348,7 @@ func TestHandlerTimeout(t *testing.T) {
|
|||
handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond})
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add("Accept", "test/plain")
|
||||
|
||||
c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
|
||||
|
@ -372,7 +372,7 @@ func TestInstrumentMetricHandlerWithCompression(t *testing.T) {
|
|||
handler := InstrumentMetricHandler(reg, HandlerForTransactional(mReg, HandlerOpts{DisableCompression: false}))
|
||||
compression := Zstd
|
||||
writer := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add(acceptHeader, acceptTextPlain)
|
||||
request.Header.Add(acceptEncodingHeader, string(compression))
|
||||
|
||||
|
@ -533,7 +533,7 @@ func TestNegotiateEncodingWriter(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add(acceptEncodingHeader, test.acceptEncoding)
|
||||
rr := httptest.NewRecorder()
|
||||
_, encodingHeader, _, err := negotiateEncodingWriter(request, rr, test.offeredCompressions)
|
||||
|
@ -631,7 +631,7 @@ func BenchmarkCompression(b *testing.B) {
|
|||
b.Run(benchmark.name+"_"+size.name, func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
writer := httptest.NewRecorder()
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
request.Header.Add(acceptEncodingHeader, benchmark.compressionType)
|
||||
handler.ServeHTTP(writer, request)
|
||||
}
|
||||
|
|
|
@ -223,7 +223,7 @@ func TestClientMiddlewareAPI_WithRequestContext(t *testing.T) {
|
|||
}))
|
||||
defer backend.Close()
|
||||
|
||||
req, err := http.NewRequest("GET", backend.URL, nil)
|
||||
req, err := http.NewRequest(http.MethodGet, backend.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ func TestClientMiddlewareAPIWithRequestContextTimeout(t *testing.T) {
|
|||
}))
|
||||
defer backend.Close()
|
||||
|
||||
req, err := http.NewRequest("GET", backend.URL, nil)
|
||||
req, err := http.NewRequest(http.MethodGet, backend.URL, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
|
|
@ -418,7 +418,7 @@ func TestMiddlewareAPI(t *testing.T) {
|
|||
_, _ = w.Write([]byte("OK"))
|
||||
})
|
||||
|
||||
r, _ := http.NewRequest("GET", "www.example.com", nil)
|
||||
r, _ := http.NewRequest(http.MethodGet, "www.example.com", nil)
|
||||
w := httptest.NewRecorder()
|
||||
chain.ServeHTTP(w, r)
|
||||
|
||||
|
@ -432,7 +432,7 @@ func TestMiddlewareAPI_WithExemplars(t *testing.T) {
|
|||
_, _ = w.Write([]byte("OK"))
|
||||
}, WithExemplarFromContext(func(_ context.Context) prometheus.Labels { return exemplar }))
|
||||
|
||||
r, _ := http.NewRequest("GET", "www.example.com", nil)
|
||||
r, _ := http.NewRequest(http.MethodGet, "www.example.com", nil)
|
||||
w := httptest.NewRecorder()
|
||||
chain.ServeHTTP(w, r)
|
||||
|
||||
|
|
|
@ -16,10 +16,10 @@
|
|||
// by using its methods, finally calling Add or Push, like this:
|
||||
//
|
||||
// // Easy case:
|
||||
// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push()
|
||||
// push.New("http://example.org/", "my_job").Gatherer(myRegistry).Push()
|
||||
//
|
||||
// // Complex case:
|
||||
// push.New("http://example.org/metrics", "my_job").
|
||||
// push.New("http://example.org/", "my_job").
|
||||
// Collector(myCollector1).
|
||||
// Collector(myCollector2).
|
||||
// Grouping("zone", "xy").
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -713,7 +714,7 @@ collected metric "broken_metric" { label:<name:"foo" value:"bar" > label:<name:"
|
|||
}
|
||||
writer := httptest.NewRecorder()
|
||||
handler := promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
for key, value := range scenario.headers {
|
||||
request.Header.Add(key, value)
|
||||
}
|
||||
|
@ -1282,7 +1283,7 @@ func ExampleRegistry_grouping() {
|
|||
ConstLabels: prometheus.Labels{
|
||||
// Generate a label unique to this worker so its metric doesn't
|
||||
// collide with the metrics from other workers.
|
||||
"worker_id": fmt.Sprintf("%d", workerID),
|
||||
"worker_id": strconv.Itoa(workerID),
|
||||
},
|
||||
})
|
||||
workerReg.MustRegister(workTime)
|
||||
|
|
|
@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|||
|
||||
s := &summary{
|
||||
desc: desc,
|
||||
now: opts.now,
|
||||
|
||||
objectives: opts.Objectives,
|
||||
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
||||
|
@ -280,6 +281,8 @@ type summary struct {
|
|||
|
||||
desc *Desc
|
||||
|
||||
now func() time.Time
|
||||
|
||||
objectives map[float64]float64
|
||||
sortedObjectives []float64
|
||||
|
||||
|
@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) {
|
|||
s.bufMtx.Lock()
|
||||
defer s.bufMtx.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
now := s.now()
|
||||
if now.After(s.hotBufExpTime) {
|
||||
s.asyncFlush(now)
|
||||
}
|
||||
|
@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error {
|
|||
s.bufMtx.Lock()
|
||||
s.mtx.Lock()
|
||||
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
||||
s.swapBufs(time.Now())
|
||||
s.swapBufs(s.now())
|
||||
s.bufMtx.Unlock()
|
||||
|
||||
s.flushColdBuf()
|
||||
|
@ -468,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) {
|
|||
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||
hotCounts := s.counts[n>>63]
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
|
@ -516,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
|||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Use atomicUpdateFloat to update hotCounts.sumBits atomically.
|
||||
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||
return oldVal + sum.GetSampleSum()
|
||||
})
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -371,10 +371,7 @@ func TestSummaryVecConcurrency(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSummaryDecay(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping test in short mode.")
|
||||
// More because it depends on timing than because it is particularly long...
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
sum := NewSummary(SummaryOpts{
|
||||
Name: "test_summary",
|
||||
|
@ -382,28 +379,28 @@ func TestSummaryDecay(t *testing.T) {
|
|||
MaxAge: 100 * time.Millisecond,
|
||||
Objectives: map[float64]float64{0.1: 0.001},
|
||||
AgeBuckets: 10,
|
||||
now: func() time.Time {
|
||||
return now
|
||||
},
|
||||
})
|
||||
|
||||
m := &dto.Metric{}
|
||||
i := 0
|
||||
tick := time.NewTicker(time.Millisecond)
|
||||
for range tick.C {
|
||||
i++
|
||||
for i := 1; i <= 1000; i++ {
|
||||
now = now.Add(time.Millisecond)
|
||||
sum.Observe(float64(i))
|
||||
if i%10 == 0 {
|
||||
sum.Write(m)
|
||||
if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
|
||||
got := *m.Summary.Quantile[0].Value
|
||||
want := math.Max(float64(i)/10, float64(i-90))
|
||||
if math.Abs(got-want) > 20 {
|
||||
t.Errorf("%d. got %f, want %f", i, got, want)
|
||||
}
|
||||
m.Reset()
|
||||
}
|
||||
if i >= 1000 {
|
||||
break
|
||||
}
|
||||
}
|
||||
tick.Stop()
|
||||
// Wait for MaxAge without observations and make sure quantiles are NaN.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Simulate waiting for MaxAge without observations
|
||||
now = now.Add(100 * time.Millisecond)
|
||||
sum.Write(m)
|
||||
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
|
||||
t.Errorf("got %f, want NaN after expiration", got)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
package promlint_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -733,7 +734,7 @@ request_duration_seconds{httpService="foo"} 10
|
|||
func TestLintUnitAbbreviations(t *testing.T) {
|
||||
genTest := func(n string) test {
|
||||
return test{
|
||||
name: fmt.Sprintf("%s with abbreviated unit", n),
|
||||
name: n + " with abbreviated unit",
|
||||
in: fmt.Sprintf(`
|
||||
# HELP %s Test metric.
|
||||
# TYPE %s gauge
|
||||
|
@ -820,7 +821,7 @@ mc_something_total 10
|
|||
|
||||
prefixValidation := func(mf *dto.MetricFamily) []error {
|
||||
if !strings.HasPrefix(mf.GetName(), "memcached_") {
|
||||
return []error{fmt.Errorf("expected metric name to start with 'memcached_'")}
|
||||
return []error{errors.New("expected metric name to start with 'memcached_'")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
package validations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
@ -27,7 +27,7 @@ func LintDuplicateMetric(mf *dto.MetricFamily) []error {
|
|||
for i, m := range mf.Metric {
|
||||
for _, k := range mf.Metric[i+1:] {
|
||||
if reflect.DeepEqual(m.Label, k.Label) {
|
||||
problems = append(problems, fmt.Errorf("metric not unique"))
|
||||
problems = append(problems, errors.New("metric not unique"))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -51,7 +50,7 @@ func TestNewConstMetricInvalidLabelValues(t *testing.T) {
|
|||
|
||||
expectPanic(t, func() {
|
||||
MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF")
|
||||
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
|
||||
}, "WithLabelValues: expected panic because: "+test.desc)
|
||||
|
||||
if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil {
|
||||
t.Errorf("NewConstMetric: expected error because: %s", test.desc)
|
||||
|
|
|
@ -16,6 +16,7 @@ package prometheus
|
|||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
@ -291,7 +292,7 @@ func testMetricVec(t *testing.T, vec *GaugeVec) {
|
|||
expected := map[[2]string]int{}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
|
||||
pair[0], pair[1] = strconv.Itoa(i%4), strconv.Itoa(i%5) // Varying combinations multiples.
|
||||
expected[pair]++
|
||||
vec.WithLabelValues(pair[0], pair[1]).Inc()
|
||||
|
||||
|
@ -363,7 +364,7 @@ func testConstrainedMetricVec(t *testing.T, vec *GaugeVec, constrain func(string
|
|||
expected := map[[2]string]int{}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
|
||||
pair[0], pair[1] = strconv.Itoa(i%4), strconv.Itoa(i%5) // Varying combinations multiples.
|
||||
expected[[2]string{pair[0], constrain(pair[1])}]++
|
||||
vec.WithLabelValues(pair[0], pair[1]).Inc()
|
||||
|
||||
|
|
Loading…
Reference in New Issue