Compare commits

...

16 Commits

Author SHA1 Message Date
Owen Williams ab35ac0ae5
Merge 6514f6eb91 into 13851e9287 2024-11-13 14:51:43 -05:00
PrometheusBot 13851e9287
Update common Prometheus files (#1683)
Signed-off-by: prombot <prometheus-team@googlegroups.com>
2024-11-12 20:02:43 +00:00
Ivan Goncharov a934c35951
Add: exponential backoff for CAS operations on floats (#1661)
* add: exponential backoff for CAS operations of floats

Signed-off-by: Ivan Goncharov <i.morph@gmail.com>

* add: some more benchmark use cases (higher contention)

Signed-off-by: Ivan Goncharov <i.morph@gmail.com>

* fmt: fumpted some files

Signed-off-by: Ivan Goncharov <i.morph@gmail.com>

* add: license header

Signed-off-by: Ivan Goncharov <i.morph@gmail.com>

* add: comment explaining origin of backoff constants

Signed-off-by: Ivan Goncharov <i.morph@gmail.com>

---------

Signed-off-by: Ivan Goncharov <i.morph@gmail.com>
2024-11-11 16:26:17 +00:00
Matthieu MOREL bab92a7743
chore: enable usestdlibvars linter (#1680)
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
2024-11-11 15:19:33 +01:00
PrometheusBot 400ee29a10
Update common Prometheus files (#1679)
Signed-off-by: prombot <prometheus-team@googlegroups.com>
2024-11-11 15:09:29 +01:00
Arthur Silva Sens 2b11a4ba39
Merge pull request #1673 from imorph/faster_find_bucket
PERF: faster algorithm to discover bucket of an histogram observation
2024-11-08 07:22:56 -03:00
Matthieu MOREL fcfad5c0b9
[chore]: enable perfsprint linter (#1676) 2024-11-08 09:54:31 +01:00
github-actions[bot] 1aa11d0498
Merge pull request #1678 from prometheus/dependabot/github_actions/github-actions-99b3cd78cd
build(deps): bump the github-actions group across 1 directory with 3 updates
2024-11-08 08:49:44 +00:00
dependabot[bot] abfb25769f
build(deps): bump the github-actions group across 1 directory with 3 updates
Bumps the github-actions group with 3 updates in the / directory: [actions/checkout](https://github.com/actions/checkout), [github/codeql-action](https://github.com/github/codeql-action) and [dagger/dagger-for-github](https://github.com/dagger/dagger-for-github).


Updates `actions/checkout` from 4.2.0 to 4.2.2
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](d632683dd7...11bd71901b)

Updates `github/codeql-action` from 3.26.10 to 3.27.0
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](e2b3eafc8d...662472033e)

Updates `dagger/dagger-for-github` from 6.11.0 to 6.14.0
- [Release notes](https://github.com/dagger/dagger-for-github/releases)
- [Commits](fc945fa66f...ad6a4e308a)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: github-actions
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: github-actions
- dependency-name: dagger/dagger-for-github
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: github-actions
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-08 08:42:37 +00:00
Ben Kochie 6de54aaefd
Merge pull request #1674 from prometheus/repo_sync
Synchronize common files from prometheus/prometheus
2024-11-08 08:41:25 +00:00
Bartlomiej Plotka 02883cbc82
Merge pull request #1675 from mharbison72/issue-1660
process_collector: avoid a compiler warning on macOS (fixes #1660)
2024-11-08 00:52:32 +01:00
Matt Harbison 3c21cc0ecf process_collector: avoid a compiler warning on macOS (fixes #1660)
The header has a warning when included, with no way to shut it off, and no
alternative to obtain these symbols.  They're technically architecture specific
values, but they aren't different between amd64 and arm64, so combine the
definitions.

Signed-off-by: Matt Harbison <mharbison72@gmail.com>
2024-11-07 17:22:22 -05:00
prombot 8aea698d6a Update common Prometheus files
Signed-off-by: prombot <prometheus-team@googlegroups.com>
2024-11-05 17:47:44 +00:00
Ivan Goncharov 78d7a94e46 add: linear search implementation (+ benchmarks)
Signed-off-by: Ivan Goncharov <i.morph@gmail.com>
2024-11-04 20:26:59 +01:00
Owen Williams 6514f6eb91 move func to gatherer
Signed-off-by: Owen Williams <owen.williams@grafana.com>
2024-10-17 13:55:29 -04:00
Owen Williams 42825b62f4 Return an http error during scraping if metrics collide when escaped to underscores
Signed-off-by: Owen Williams <owen.williams@grafana.com>
2024-10-17 11:20:00 -04:00
33 changed files with 825 additions and 149 deletions

View File

@ -46,11 +46,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
uses: github/codeql-action/init@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -61,7 +61,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
uses: github/codeql-action/autobuild@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@ -75,4 +75,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
uses: github/codeql-action/analyze@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0

View File

@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
@ -40,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name

View File

@ -23,9 +23,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Lint
uses: dagger/dagger-for-github@fc945fa66fc7bfa72bc80f85b1a1ef4bd1d30cbb # v6.11.0
uses: dagger/dagger-for-github@ad6a4e308a42fb2cf9be8b060f9aba9d57d4c9aa # v6.14.0
with:
version: "latest"
verb: call

View File

@ -24,7 +24,7 @@ jobs:
supported_versions: ${{ steps.matrix.outputs.supported_versions }}
steps:
- name: Checkout code
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Read supported_go_versions.txt
id: matrix
run: |
@ -43,17 +43,17 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run tests and check license
uses: dagger/dagger-for-github@fc945fa66fc7bfa72bc80f85b1a1ef4bd1d30cbb # v6.11.0
uses: dagger/dagger-for-github@ad6a4e308a42fb2cf9be8b060f9aba9d57d4c9aa # v6.14.0
with:
version: "latest"
verb: call
args: -vvv --src . make --go-version ${{matrix.go_version}} --args 'check_license test'
- name: Run style and unused
uses: dagger/dagger-for-github@fc945fa66fc7bfa72bc80f85b1a1ef4bd1d30cbb # v6.11.0
uses: dagger/dagger-for-github@ad6a4e308a42fb2cf9be8b060f9aba9d57d4c9aa # v6.14.0
if: ${{ matrix.go_version == '1.21' }}
with:
version: "latest"

View File

@ -24,16 +24,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
go-version: 1.23.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
with:
args: --verbose
version: v1.60.2
version: v1.61.0

View File

@ -13,7 +13,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Execute bash script
run: bash update-go-version.bash

View File

@ -25,11 +25,13 @@ linters:
- ineffassign
- misspell
- nolintlint
- perfsprint
- predeclared
- revive
- staticcheck
- unconvert
- unused
- usestdlibvars
- wastedassign
issues:
@ -66,6 +68,17 @@ linters-settings:
local-prefixes: github.com/prometheus/client_golang
gofumpt:
extra-rules: true
perfsprint:
# Optimizes even if it requires an int or uint type cast.
int-conversion: true
# Optimizes into `err.Error()` even if it is only equivalent for non-nil errors.
err-error: true
# Optimizes `fmt.Errorf`.
errorf: true
# Optimizes `fmt.Sprintf` with only one argument.
sprintf1: true
# Optimizes into strings concatenation.
strconcat: true
revive:
rules:
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter

View File

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.60.2
GOLANGCI_LINT_VERSION ?= v1.61.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View File

@ -16,13 +16,13 @@ package v1
import (
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"strconv"
"strings"
"testing"
"time"
@ -260,7 +260,7 @@ func TestAPIs(t *testing.T) {
},
{
do: doQuery("2", testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/query",
@ -336,7 +336,7 @@ func TestAPIs(t *testing.T) {
End: testTime,
Step: 1 * time.Minute,
}, WithTimeout(5*time.Second)),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/query_range",
@ -361,14 +361,14 @@ func TestAPIs(t *testing.T) {
{
do: doLabelNames(nil, testTime.Add(-100*time.Hour), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/labels",
err: errors.New("some error"),
},
{
do: doLabelNames(nil, testTime.Add(-100*time.Hour), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
inWarnings: []string{"a"},
reqMethod: "POST",
reqPath: "/api/v1/labels",
@ -400,14 +400,14 @@ func TestAPIs(t *testing.T) {
{
do: doLabelValues(nil, "mylabel", testTime.Add(-100*time.Hour), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "GET",
reqPath: "/api/v1/label/mylabel/values",
err: errors.New("some error"),
},
{
do: doLabelValues(nil, "mylabel", testTime.Add(-100*time.Hour), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
inWarnings: []string{"a"},
reqMethod: "GET",
reqPath: "/api/v1/label/mylabel/values",
@ -464,7 +464,7 @@ func TestAPIs(t *testing.T) {
{
do: doSeries("up", testTime.Add(-time.Minute), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/series",
err: errors.New("some error"),
@ -472,7 +472,7 @@ func TestAPIs(t *testing.T) {
// Series with error and warning.
{
do: doSeries("up", testTime.Add(-time.Minute), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
inWarnings: []string{"a"},
reqMethod: "POST",
reqPath: "/api/v1/series",
@ -493,7 +493,7 @@ func TestAPIs(t *testing.T) {
{
do: doSnapshot(true),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/snapshot",
err: errors.New("some error"),
@ -507,7 +507,7 @@ func TestAPIs(t *testing.T) {
{
do: doCleanTombstones(),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/clean_tombstones",
err: errors.New("some error"),
@ -528,7 +528,7 @@ func TestAPIs(t *testing.T) {
{
do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/delete_series",
err: errors.New("some error"),
@ -550,8 +550,8 @@ func TestAPIs(t *testing.T) {
do: doConfig(),
reqMethod: "GET",
reqPath: "/api/v1/status/config",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -578,16 +578,16 @@ func TestAPIs(t *testing.T) {
do: doFlags(),
reqMethod: "GET",
reqPath: "/api/v1/status/flags",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
do: doBuildinfo(),
reqMethod: "GET",
reqPath: "/api/v1/status/buildinfo",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -616,8 +616,8 @@ func TestAPIs(t *testing.T) {
do: doRuntimeinfo(),
reqMethod: "GET",
reqPath: "/api/v1/status/runtimeinfo",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -684,8 +684,8 @@ func TestAPIs(t *testing.T) {
do: doAlertManagers(),
reqMethod: "GET",
reqPath: "/api/v1/alertmanagers",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -891,8 +891,8 @@ func TestAPIs(t *testing.T) {
do: doRules(),
reqMethod: "GET",
reqPath: "/api/v1/rules",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -971,8 +971,8 @@ func TestAPIs(t *testing.T) {
do: doTargets(),
reqMethod: "GET",
reqPath: "/api/v1/targets",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -1005,7 +1005,7 @@ func TestAPIs(t *testing.T) {
{
do: doTargetsMetadata("{job=\"prometheus\"}", "go_goroutines", "1"),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "GET",
reqPath: "/api/v1/targets/metadata",
err: errors.New("some error"),
@ -1037,7 +1037,7 @@ func TestAPIs(t *testing.T) {
{
do: doMetadata("", "1"),
inErr: fmt.Errorf("some error"),
inErr: errors.New("some error"),
reqMethod: "GET",
reqPath: "/api/v1/metadata",
err: errors.New("some error"),
@ -1047,8 +1047,8 @@ func TestAPIs(t *testing.T) {
do: doTSDB(),
reqMethod: "GET",
reqPath: "/api/v1/status/tsdb",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -1127,8 +1127,8 @@ func TestAPIs(t *testing.T) {
do: doWalReply(),
reqMethod: "GET",
reqPath: "/api/v1/status/walreplay",
inErr: fmt.Errorf("some error"),
err: fmt.Errorf("some error"),
inErr: errors.New("some error"),
err: errors.New("some error"),
},
{
@ -1212,7 +1212,7 @@ func TestAPIs(t *testing.T) {
tests = append(tests, queryTests...)
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
tc.curTest = test
res, warnings, err := test.do()
@ -1430,7 +1430,7 @@ func TestAPIClientDo(t *testing.T) {
}
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
tc.ch <- test
_, body, warnings, err := client.Do(context.Background(), tc.req)

View File

@ -17,10 +17,10 @@
package main
import (
"fmt"
"log"
"math/rand"
"net/http"
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -50,7 +50,7 @@ func main() {
// Record fictional latency.
now := time.Now()
requestDurations.(prometheus.ExemplarObserver).ObserveWithExemplar(
time.Since(now).Seconds(), prometheus.Labels{"dummyID": fmt.Sprint(rand.Intn(100000))},
time.Since(now).Seconds(), prometheus.Labels{"dummyID": strconv.Itoa(rand.Intn(100000))},
)
time.Sleep(600 * time.Millisecond)
}

View File

@ -18,11 +18,11 @@ package main
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"net/http"
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -116,7 +116,7 @@ func main() {
// the ExemplarObserver interface and thus don't need to
// check the outcome of the type assertion.
m.rpcDurationsHistogram.(prometheus.ExemplarObserver).ObserveWithExemplar(
v, prometheus.Labels{"dummyID": fmt.Sprint(rand.Intn(100000))},
v, prometheus.Labels{"dummyID": strconv.Itoa(rand.Intn(100000))},
)
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
}

View File

@ -0,0 +1,50 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"sync/atomic"
"time"
)
// atomicUpdateFloat atomically updates the float64 value pointed to by bits
// using the provided updateFunc, with an exponential backoff on contention.
func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) {
const (
// both numbers are derived from empirical observations
// documented in this PR: https://github.com/prometheus/client_golang/pull/1661
maxBackoff = 320 * time.Millisecond
initialBackoff = 10 * time.Millisecond
)
backoff := initialBackoff
for {
loadedBits := atomic.LoadUint64(bits)
oldFloat := math.Float64frombits(loadedBits)
newFloat := updateFunc(oldFloat)
newBits := math.Float64bits(newFloat)
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
break
} else {
// Exponential backoff with sleep and cap to avoid infinite wait
time.Sleep(backoff)
backoff *= 2
if backoff > maxBackoff {
backoff = maxBackoff
}
}
}
}

View File

@ -0,0 +1,167 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"sync"
"sync/atomic"
"testing"
"unsafe"
)
var output float64
func TestAtomicUpdateFloat(t *testing.T) {
var val float64 = 0.0
bits := (*uint64)(unsafe.Pointer(&val))
var wg sync.WaitGroup
numGoroutines := 100000
increment := 1.0
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func() {
defer wg.Done()
atomicUpdateFloat(bits, func(f float64) float64 {
return f + increment
})
}()
}
wg.Wait()
expected := float64(numGoroutines) * increment
if val != expected {
t.Errorf("Expected %f, got %f", expected, val)
}
}
// Benchmark for atomicUpdateFloat with single goroutine (no contention).
func BenchmarkAtomicUpdateFloat_SingleGoroutine(b *testing.B) {
var val float64 = 0.0
bits := (*uint64)(unsafe.Pointer(&val))
for i := 0; i < b.N; i++ {
atomicUpdateFloat(bits, func(f float64) float64 {
return f + 1.0
})
}
output = val
}
// Benchmark for old implementation with single goroutine (no contention) -> to check overhead of backoff
func BenchmarkAtomicNoBackoff_SingleGoroutine(b *testing.B) {
var val float64 = 0.0
bits := (*uint64)(unsafe.Pointer(&val))
for i := 0; i < b.N; i++ {
for {
loadedBits := atomic.LoadUint64(bits)
newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0)
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
break
}
}
}
output = val
}
// Benchmark varying the number of goroutines.
func benchmarkAtomicUpdateFloatConcurrency(b *testing.B, numGoroutines int) {
var val float64 = 0.0
bits := (*uint64)(unsafe.Pointer(&val))
b.SetParallelism(numGoroutines)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
atomicUpdateFloat(bits, func(f float64) float64 {
return f + 1.0
})
}
})
output = val
}
func benchmarkAtomicNoBackoffFloatConcurrency(b *testing.B, numGoroutines int) {
var val float64 = 0.0
bits := (*uint64)(unsafe.Pointer(&val))
b.SetParallelism(numGoroutines)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for {
loadedBits := atomic.LoadUint64(bits)
newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0)
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
break
}
}
}
})
output = val
}
func BenchmarkAtomicUpdateFloat_1Goroutine(b *testing.B) {
benchmarkAtomicUpdateFloatConcurrency(b, 1)
}
func BenchmarkAtomicNoBackoff_1Goroutine(b *testing.B) {
benchmarkAtomicNoBackoffFloatConcurrency(b, 1)
}
func BenchmarkAtomicUpdateFloat_2Goroutines(b *testing.B) {
benchmarkAtomicUpdateFloatConcurrency(b, 2)
}
func BenchmarkAtomicNoBackoff_2Goroutines(b *testing.B) {
benchmarkAtomicNoBackoffFloatConcurrency(b, 2)
}
func BenchmarkAtomicUpdateFloat_4Goroutines(b *testing.B) {
benchmarkAtomicUpdateFloatConcurrency(b, 4)
}
func BenchmarkAtomicNoBackoff_4Goroutines(b *testing.B) {
benchmarkAtomicNoBackoffFloatConcurrency(b, 4)
}
func BenchmarkAtomicUpdateFloat_8Goroutines(b *testing.B) {
benchmarkAtomicUpdateFloatConcurrency(b, 8)
}
func BenchmarkAtomicNoBackoff_8Goroutines(b *testing.B) {
benchmarkAtomicNoBackoffFloatConcurrency(b, 8)
}
func BenchmarkAtomicUpdateFloat_16Goroutines(b *testing.B) {
benchmarkAtomicUpdateFloatConcurrency(b, 16)
}
func BenchmarkAtomicNoBackoff_16Goroutines(b *testing.B) {
benchmarkAtomicNoBackoffFloatConcurrency(b, 16)
}
func BenchmarkAtomicUpdateFloat_32Goroutines(b *testing.B) {
benchmarkAtomicUpdateFloatConcurrency(b, 32)
}
func BenchmarkAtomicNoBackoff_32Goroutines(b *testing.B) {
benchmarkAtomicNoBackoffFloatConcurrency(b, 32)
}

View File

@ -134,13 +134,9 @@ func (c *counter) Add(v float64) {
return
}
for {
oldBits := atomic.LoadUint64(&c.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
return
}
}
atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 {
return oldVal + v
})
}
func (c *counter) AddWithExemplar(v float64, e Labels) {

View File

@ -14,7 +14,6 @@
package prometheus
import (
"fmt"
"math"
"strings"
"testing"
@ -120,10 +119,10 @@ func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) {
expectPanic(t, func() {
counterVec.WithLabelValues(labelValues...)
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
}, "WithLabelValues: expected panic because: "+test.desc)
expectPanic(t, func() {
counterVec.With(test.labels)
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
}, "WithLabelValues: expected panic because: "+test.desc)
if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil {
t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc)

View File

@ -57,6 +57,9 @@ type Desc struct {
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
id uint64
// escapedID is similar to id, but with the metric and label names escaped
// with underscores.
escapedID uint64
// dimHash is a hash of the label names (preset and variable) and the
// Help string. Each Desc with the same fqName must have the same
// dimHash.
@ -142,11 +145,18 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
}
xxh := xxhash.New()
for _, val := range labelValues {
escapedXXH := xxhash.New()
for i, val := range labelValues {
xxh.WriteString(val)
xxh.Write(separatorByteSlice)
if i == 0 {
val = model.EscapeName(val, model.UnderscoreEscaping)
}
escapedXXH.WriteString(val)
escapedXXH.Write(separatorByteSlice)
}
d.id = xxh.Sum64()
d.escapedID = escapedXXH.Sum64()
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted

View File

@ -120,13 +120,9 @@ func (g *gauge) Dec() {
}
func (g *gauge) Add(val float64) {
for {
oldBits := atomic.LoadUint64(&g.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
return
}
}
atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 {
return oldVal + val
})
}
func (g *gauge) Sub(val float64) {

View File

@ -858,15 +858,35 @@ func (h *histogram) Write(out *dto.Metric) error {
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
n := len(h.upperBounds)
if n == 0 {
return 0
}
// Early exit: if v is less than or equal to the first upper bound, return 0
if v <= h.upperBounds[0] {
return 0
}
// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
if v > h.upperBounds[n-1] {
return n
}
// For small arrays, use simple linear search
// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
// see more details here: https://github.com/prometheus/client_golang/pull/1662
if n < 35 {
for i, bound := range h.upperBounds {
if v <= bound {
return i
}
}
// If v is greater than all upper bounds, return len(h.upperBounds)
return n
}
// For larger arrays, use stdlib's binary search
return sort.SearchFloat64s(h.upperBounds, v)
}
@ -1621,13 +1641,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) {
// atomicAddFloat adds the provided float atomically to another float
// represented by the bit pattern the bits pointer is pointing to.
func atomicAddFloat(bits *uint64, v float64) {
for {
loadedBits := atomic.LoadUint64(bits)
newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
break
}
}
atomicUpdateFloat(bits, func(oldVal float64) float64 {
return oldVal + v
})
}
// atomicDecUint32 atomically decrements the uint32 p points to. See

View File

@ -1455,3 +1455,91 @@ func compareNativeExemplarValues(t *testing.T, exps []*dto.Exemplar, values []fl
}
}
}
var resultFindBucket int
func benchmarkFindBucket(b *testing.B, l int) {
h := &histogram{upperBounds: make([]float64, l)}
for i := range h.upperBounds {
h.upperBounds[i] = float64(i)
}
v := float64(l / 2)
b.ResetTimer()
for i := 0; i < b.N; i++ {
resultFindBucket = h.findBucket(v)
}
}
func BenchmarkFindBucketShort(b *testing.B) {
benchmarkFindBucket(b, 20)
}
func BenchmarkFindBucketMid(b *testing.B) {
benchmarkFindBucket(b, 40)
}
func BenchmarkFindBucketLarge(b *testing.B) {
benchmarkFindBucket(b, 100)
}
func BenchmarkFindBucketHuge(b *testing.B) {
benchmarkFindBucket(b, 500)
}
func BenchmarkFindBucketInf(b *testing.B) {
h := &histogram{upperBounds: make([]float64, 500)}
for i := range h.upperBounds {
h.upperBounds[i] = float64(i)
}
v := 1000.5
b.ResetTimer()
for i := 0; i < b.N; i++ {
resultFindBucket = h.findBucket(v)
}
}
func BenchmarkFindBucketLow(b *testing.B) {
h := &histogram{upperBounds: make([]float64, 500)}
for i := range h.upperBounds {
h.upperBounds[i] = float64(i)
}
v := -1.1
b.ResetTimer()
for i := 0; i < b.N; i++ {
resultFindBucket = h.findBucket(v)
}
}
func TestFindBucket(t *testing.T) {
smallHistogram := &histogram{upperBounds: []float64{1, 2, 3, 4, 5}}
largeHistogram := &histogram{upperBounds: make([]float64, 50)}
for i := range largeHistogram.upperBounds {
largeHistogram.upperBounds[i] = float64(i)
}
tests := []struct {
h *histogram
v float64
expected int
}{
{smallHistogram, -1, 0},
{smallHistogram, 0.5, 0},
{smallHistogram, 2.5, 2},
{smallHistogram, 5.5, 5},
{largeHistogram, -1, 0},
{largeHistogram, 25.5, 26},
{largeHistogram, 49.5, 50},
{largeHistogram, 50.5, 50},
{largeHistogram, 5000.5, 50},
}
for _, tt := range tests {
result := tt.h.findBucket(tt.v)
if result != tt.expected {
t.Errorf("findBucket(%v) = %d; expected %d", tt.v, result, tt.expected)
}
}
}

View File

@ -22,6 +22,7 @@ import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
return strconv.Itoa(beginning)
}
if length == 0 {
beginning-- // empty ranges begin at line just before the range

View File

@ -15,12 +15,16 @@
#include <mach/mach_init.h>
#include <mach/task.h>
// Compiler warns that shared_memory_server.h is deprecated, use this instead.
// But this doesn't define SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE.
//#include <mach/shared_region.h>
#include <mach/shared_memory_server.h>
#include <mach/mach_vm.h>
// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
// mach/shared_region.h instead. But that doesn't define
// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
// avoid a warning message when running tests.
#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U
#define SHARED_DATA_REGION_SIZE 0x10000000
#define SHARED_TEXT_REGION_SIZE 0x10000000
int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
{

View File

@ -22,9 +22,7 @@ import "C"
import "fmt"
func getMemory() (*memoryInfo, error) {
var (
rss, vsize C.ulonglong
)
var rss, vsize C.ulonglong
if err := C.get_memory_info(&rss, &vsize); err != 0 {
return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))

View File

@ -43,6 +43,7 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
@ -121,6 +122,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
if opts.MaxRequestsInFlight > 0 {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
var hasEscapedCollisions bool
if opts.Registry != nil {
// Initialize all possibilities that can occur below.
errCnt.WithLabelValues("gathering")
@ -134,6 +136,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
}
}
hasEscapedCollisions = reg.HasEscapedCollision()
// Select compression formats to offer based on default or user choice.
var compressions []string
@ -190,6 +193,19 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} else {
contentType = expfmt.Negotiate(req.Header)
}
if hasEscapedCollisions {
switch contentType.ToEscapingScheme() {
case model.UnderscoreEscaping, model.DotsEscaping:
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error: one or more metrics collide when escaped")
}
httpError(rsp, fmt.Errorf("one or more metrics collide when escaped"))
return
default:
}
}
rsp.Header().Set(contentTypeHeader, string(contentType))
w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)

View File

@ -28,6 +28,8 @@ import (
"github.com/klauspost/compress/zstd"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/client_golang/prometheus"
)
@ -79,6 +81,10 @@ func (g *mockTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(),
return mfs, func() { g.doneInvoked++ }, err
}
func (g *mockTransactionGatherer) HasEscapedCollision() bool {
return g.g.HasEscapedCollision()
}
func readCompressedBody(r io.Reader, comp Compression) (string, error) {
switch comp {
case Gzip:
@ -98,7 +104,7 @@ func readCompressedBody(r io.Reader, comp Compression) (string, error) {
got, err := io.ReadAll(reader)
return string(got), err
}
return "", fmt.Errorf("Unsupported compression")
return "", errors.New("Unsupported compression")
}
func TestHandlerErrorHandling(t *testing.T) {
@ -131,7 +137,7 @@ func TestHandlerErrorHandling(t *testing.T) {
logger := log.New(logBuf, "", 0)
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add("Accept", "test/plain")
mReg := &mockTransactionGatherer{g: reg}
@ -252,7 +258,7 @@ func TestInstrumentMetricHandler(t *testing.T) {
// Do it again to test idempotency.
InstrumentMetricHandler(reg, HandlerForTransactional(mReg, HandlerOpts{}))
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add(acceptHeader, acceptTextPlain)
handler.ServeHTTP(writer, request)
@ -311,7 +317,7 @@ func TestHandlerMaxRequestsInFlight(t *testing.T) {
w1 := httptest.NewRecorder()
w2 := httptest.NewRecorder()
w3 := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add(acceptHeader, acceptTextPlain)
c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
@ -348,7 +354,7 @@ func TestHandlerTimeout(t *testing.T) {
handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond})
w := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add("Accept", "test/plain")
c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)}
@ -372,7 +378,7 @@ func TestInstrumentMetricHandlerWithCompression(t *testing.T) {
handler := InstrumentMetricHandler(reg, HandlerForTransactional(mReg, HandlerOpts{DisableCompression: false}))
compression := Zstd
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add(acceptHeader, acceptTextPlain)
request.Header.Add(acceptEncodingHeader, string(compression))
@ -533,7 +539,7 @@ func TestNegotiateEncodingWriter(t *testing.T) {
}
for _, test := range testCases {
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add(acceptEncodingHeader, test.acceptEncoding)
rr := httptest.NewRecorder()
_, encodingHeader, _, err := negotiateEncodingWriter(request, rr, test.offeredCompressions)
@ -548,6 +554,50 @@ func TestNegotiateEncodingWriter(t *testing.T) {
}
}
func TestEscapedCollisions(t *testing.T) {
oldScheme := model.NameValidationScheme
defer func() {
model.NameValidationScheme = oldScheme
}()
model.NameValidationScheme = model.UTF8Validation
reg := prometheus.NewRegistry()
reg.MustRegister(prometheus.NewCounter(prometheus.CounterOpts{
Name: "test_metric",
Help: "A test metric with underscores",
}))
reg.MustRegister(prometheus.NewCounter(prometheus.CounterOpts{
Name: "test.metric",
Help: "A test metric with dots",
}))
handler := HandlerFor(reg, HandlerOpts{})
t.Run("fail case", func(t *testing.T) {
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/metrics", nil)
request.Header.Add(acceptHeader, string(expfmt.NewFormat(expfmt.TypeTextPlain)))
handler.ServeHTTP(writer, request)
if writer.Code != 500 {
t.Errorf("wanted error 500, got %d", writer.Code)
}
expectErr := "An error has occurred while serving metrics:\n\none or more metrics collide when escaped\n"
if writer.Body.String() != expectErr {
t.Error("incorrect body returned, want " + expectErr + " got " + writer.Body.String())
}
})
t.Run("success case", func(t *testing.T) {
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/metrics", nil)
request.Header.Add(acceptHeader, string(expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)))
handler.ServeHTTP(writer, request)
if writer.Code != 200 {
t.Errorf("wanted 200 OK, got %d", writer.Code)
}
})
}
func BenchmarkCompression(b *testing.B) {
benchmarks := []struct {
name string
@ -631,7 +681,7 @@ func BenchmarkCompression(b *testing.B) {
b.Run(benchmark.name+"_"+size.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
request.Header.Add(acceptEncodingHeader, benchmark.compressionType)
handler.ServeHTTP(writer, request)
}

View File

@ -223,7 +223,7 @@ func TestClientMiddlewareAPI_WithRequestContext(t *testing.T) {
}))
defer backend.Close()
req, err := http.NewRequest("GET", backend.URL, nil)
req, err := http.NewRequest(http.MethodGet, backend.URL, nil)
if err != nil {
t.Fatalf("%v", err)
}
@ -276,7 +276,7 @@ func TestClientMiddlewareAPIWithRequestContextTimeout(t *testing.T) {
}))
defer backend.Close()
req, err := http.NewRequest("GET", backend.URL, nil)
req, err := http.NewRequest(http.MethodGet, backend.URL, nil)
if err != nil {
t.Fatalf("%v", err)
}

View File

@ -418,7 +418,7 @@ func TestMiddlewareAPI(t *testing.T) {
_, _ = w.Write([]byte("OK"))
})
r, _ := http.NewRequest("GET", "www.example.com", nil)
r, _ := http.NewRequest(http.MethodGet, "www.example.com", nil)
w := httptest.NewRecorder()
chain.ServeHTTP(w, r)
@ -432,7 +432,7 @@ func TestMiddlewareAPI_WithExemplars(t *testing.T) {
_, _ = w.Write([]byte("OK"))
}, WithExemplarFromContext(func(_ context.Context) prometheus.Labels { return exemplar }))
r, _ := http.NewRequest("GET", "www.example.com", nil)
r, _ := http.NewRequest(http.MethodGet, "www.example.com", nil)
w := httptest.NewRecorder()
chain.ServeHTTP(w, r)

View File

@ -66,12 +66,18 @@ func init() {
// pre-registered.
func NewRegistry() *Registry {
return &Registry{
collectorsByID: map[uint64]Collector{},
descIDs: map[uint64]struct{}{},
dimHashesByName: map[string]uint64{},
collectorsByID: map[uint64]Collector{},
collectorsByEscapedID: map[uint64]Collector{},
descIDs: map[uint64]struct{}{},
escapedDescIDs: map[uint64]struct{}{},
dimHashesByName: map[string]uint64{},
}
}
func (r *Registry) HasEscapedCollision() bool {
return r.hasEscapedCollision
}
// NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has
// actually been registered with the registry. Unchecked Collectors (those whose
@ -158,6 +164,11 @@ type Gatherer interface {
// expose an incomplete result and instead disregard the returned
// MetricFamily protobufs in case the returned error is non-nil.
Gather() ([]*dto.MetricFamily, error)
// HasEscapedCollision returns true if any two of the registered metrics would
// be the same when escaped to underscores. This is needed to prevent
// duplicate metric issues when being scraped by a legacy system.
HasEscapedCollision() bool
}
// Register registers the provided Collector with the DefaultRegisterer.
@ -194,6 +205,10 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
return gf()
}
func (gf GathererFunc) HasEscapedCollision() bool {
return false
}
// AlreadyRegisteredError is returned by the Register method if the Collector to
// be registered has already been registered before, or a different Collector
// that collects the same metrics has been registered before. Registration fails
@ -258,22 +273,36 @@ func (errs MultiError) MaybeUnwrap() error {
// Registry implements Collector to allow it to be used for creating groups of
// metrics. See the Grouping example for how this can be done.
type Registry struct {
mtx sync.RWMutex
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
mtx sync.RWMutex
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
// collectorsByEscapedID stores colletors by escapedID, only if escaped id is
// different (otherwise we can just do the lookup in the regular map).
collectorsByEscapedID map[uint64]Collector
descIDs map[uint64]struct{}
// escapedDescIDs records desc ids of the escaped version of the metric, only
// if different from the regular name.
escapedDescIDs map[uint64]struct{}
dimHashesByName map[string]uint64
uncheckedCollectors []Collector
pedanticChecksEnabled bool
// hasEscapedCollision is set to true if any two metrics that were not
// identical under UTF-8 would collide if scraped by a system that requires
// names to be escaped to legacy underscore replacement.
hasEscapedCollision bool
}
// Register implements Registerer.
func (r *Registry) Register(c Collector) error {
var (
descChan = make(chan *Desc, capDescChan)
newDescIDs = map[uint64]struct{}{}
newDimHashesByName = map[string]uint64{}
collectorID uint64 // All desc IDs XOR'd together.
duplicateDescErr error
descChan = make(chan *Desc, capDescChan)
newDescIDs = map[uint64]struct{}{}
newEscapedIDs = map[uint64]struct{}{}
newDimHashesByName = map[string]uint64{}
collectorID uint64 // All desc IDs XOR'd together.
escapedID uint64
duplicateDescErr error
duplicateEscapedDesc bool
)
go func() {
c.Describe(descChan)
@ -307,6 +336,22 @@ func (r *Registry) Register(c Collector) error {
collectorID ^= desc.id
}
// Also check to see if the descID is unique when all the names are escaped
// to underscores. First check the primary map, then check the secondary
// map. We only officially log a collision later.
if _, exists := r.descIDs[desc.escapedID]; exists {
duplicateEscapedDesc = true
}
if _, exists := r.escapedDescIDs[desc.escapedID]; exists {
duplicateEscapedDesc = true
}
if _, exists := newEscapedIDs[desc.escapedID]; !exists {
if desc.escapedID != desc.id {
newEscapedIDs[desc.escapedID] = struct{}{}
}
escapedID ^= desc.escapedID
}
// Are all the label names and the help string consistent with
// previous descriptors of the same name?
// First check existing descriptors...
@ -331,7 +376,17 @@ func (r *Registry) Register(c Collector) error {
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
return nil
}
if existing, exists := r.collectorsByID[collectorID]; exists {
existing, collision := r.collectorsByID[collectorID]
// Also check whether the underscore-escaped versions of the IDs match.
if !collision {
_, escapedCollision := r.collectorsByID[escapedID]
r.hasEscapedCollision = r.hasEscapedCollision || escapedCollision
_, escapedCollision = r.collectorsByEscapedID[escapedID]
r.hasEscapedCollision = r.hasEscapedCollision || escapedCollision
}
if collision {
switch e := existing.(type) {
case *wrappingCollector:
return AlreadyRegisteredError{
@ -351,23 +406,36 @@ func (r *Registry) Register(c Collector) error {
return duplicateDescErr
}
if duplicateEscapedDesc {
r.hasEscapedCollision = true
}
// Only after all tests have passed, actually register.
r.collectorsByID[collectorID] = c
// We only need to store the escapedID if it doesn't match the unescaped one.
if escapedID != collectorID {
r.collectorsByEscapedID[escapedID] = c
}
for hash := range newDescIDs {
r.descIDs[hash] = struct{}{}
}
for name, dimHash := range newDimHashesByName {
r.dimHashesByName[name] = dimHash
}
for hash := range newEscapedIDs {
r.escapedDescIDs[hash] = struct{}{}
}
return nil
}
// Unregister implements Registerer.
func (r *Registry) Unregister(c Collector) bool {
var (
descChan = make(chan *Desc, capDescChan)
descIDs = map[uint64]struct{}{}
collectorID uint64 // All desc IDs XOR'd together.
descChan = make(chan *Desc, capDescChan)
descIDs = map[uint64]struct{}{}
escapedDescIDs = map[uint64]struct{}{}
collectorID uint64 // All desc IDs XOR'd together.
collectorEscapedID uint64
)
go func() {
c.Describe(descChan)
@ -377,6 +445,8 @@ func (r *Registry) Unregister(c Collector) bool {
if _, exists := descIDs[desc.id]; !exists {
collectorID ^= desc.id
descIDs[desc.id] = struct{}{}
collectorEscapedID ^= desc.escapedID
escapedDescIDs[desc.escapedID] = struct{}{}
}
}
@ -391,9 +461,13 @@ func (r *Registry) Unregister(c Collector) bool {
defer r.mtx.Unlock()
delete(r.collectorsByID, collectorID)
delete(r.collectorsByEscapedID, collectorEscapedID)
for id := range descIDs {
delete(r.descIDs, id)
}
for id := range escapedDescIDs {
delete(r.escapedDescIDs, id)
}
// dimHashesByName is left untouched as those must be consistent
// throughout the lifetime of a program.
return true
@ -802,6 +876,15 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
func (gs Gatherers) HasEscapedCollision() bool {
for _, g := range gs {
if g.HasEscapedCollision() {
return true
}
}
return false
}
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
// Prometheus text format and the internal metric representation of the
// Prometheus server add while flattening Summaries and Histograms.
@ -1033,6 +1116,15 @@ func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err err
}, errs.MaybeUnwrap()
}
func (r *MultiTRegistry) HasEscapedCollision() bool {
for _, g := range r.tGatherers {
if g.HasEscapedCollision() {
return true
}
}
return false
}
// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
// used by metric family is no longer used by a caller. This allows implementations with cache.
type TransactionalGatherer interface {
@ -1058,6 +1150,11 @@ type TransactionalGatherer interface {
// Important: done is expected to be triggered (even if the error occurs!)
// once caller does not need returned slice of dto.MetricFamily.
Gather() (_ []*dto.MetricFamily, done func(), err error)
// HasEscapedCollision returns true if any two of the registered metrics would
// be the same when escaped to underscores. This is needed to prevent
// duplicate metric issues when being scraped by a legacy system.
HasEscapedCollision() bool
}
// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
@ -1074,3 +1171,7 @@ func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), er
mfs, err := g.g.Gather()
return mfs, func() {}, err
}
func (g *noTransactionGatherer) HasEscapedCollision() bool {
return g.g.HasEscapedCollision()
}

View File

@ -27,6 +27,7 @@ import (
"net/http"
"net/http/httptest"
"os"
"strconv"
"sync"
"testing"
"time"
@ -36,6 +37,7 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
@ -713,7 +715,7 @@ collected metric "broken_metric" { label:<name:"foo" value:"bar" > label:<name:"
}
writer := httptest.NewRecorder()
handler := promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})
request, _ := http.NewRequest("GET", "/", nil)
request, _ := http.NewRequest(http.MethodGet, "/", nil)
for key, value := range scenario.headers {
request.Header.Add(key, value)
}
@ -1181,6 +1183,175 @@ func TestAlreadyRegisteredCollision(t *testing.T) {
}
}
func TestAlreadyRegisteredEscapingCollision(t *testing.T) {
oldValidation := model.NameValidationScheme
model.NameValidationScheme = model.UTF8Validation
defer func() {
model.NameValidationScheme = oldValidation
}()
tests := []struct {
name string
// These are functions because hashes that determine collision are created
// at metric creation time.
counterA func() prometheus.Counter
counterB func() prometheus.Counter
expectErr bool
expectLegacyCollision bool
}{
{
name: "no metric name collision",
counterA: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
counterB: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "myAcounterAa",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
},
{
name: "compatibility metric name collision",
counterA: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
counterB: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my.counter.a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
expectLegacyCollision: true,
},
{
// This is a regression test to make sure we are not accidentally
// reporting collisions when label values are different.
name: "no label value collision",
counterA: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"name": "label.value",
"type": "test",
},
})
},
counterB: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"name": "label_value",
"type": "test",
},
})
},
},
{
name: "compatibility label name collision",
counterA: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"label.name": "name",
"type": "test",
},
})
},
counterB: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"label_name": "name",
"type": "test",
},
})
},
expectErr: true,
expectLegacyCollision: false,
},
{
name: "no utf8 metric name collision",
counterA: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my_counter_a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
counterB: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my.counter.a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
expectLegacyCollision: true,
},
{
name: "post init flag flip, should collide",
counterA: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my.counter.a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
counterB: func() prometheus.Counter {
return prometheus.NewCounter(prometheus.CounterOpts{
Name: "my.counter.a",
ConstLabels: prometheus.Labels{
"name": "label",
"type": "test",
},
})
},
expectErr: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
reg := prometheus.NewRegistry()
err := reg.Register(tc.counterA())
if err != nil {
t.Errorf("required no error, got %v", err)
}
err = reg.Register(tc.counterB())
if tc.expectErr != (err != nil) {
t.Errorf("required error state %v, got %v", tc.expectErr, err)
}
if tc.expectLegacyCollision != reg.HasEscapedCollision() {
t.Errorf("legacy collision mismatch, want %v got %v", tc.expectLegacyCollision, reg.HasEscapedCollision())
}
})
}
}
type tGatherer struct {
done bool
err error
@ -1194,6 +1365,10 @@ func (g *tGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
}, func() { g.done = true }, g.err
}
func (g *tGatherer) HasEscapedCollision() bool {
return false
}
func TestNewMultiTRegistry(t *testing.T) {
treg := &tGatherer{}
@ -1282,7 +1457,7 @@ func ExampleRegistry_grouping() {
ConstLabels: prometheus.Labels{
// Generate a label unique to this worker so its metric doesn't
// collide with the metrics from other workers.
"worker_id": fmt.Sprintf("%d", workerID),
"worker_id": strconv.Itoa(workerID),
},
})
workerReg.MustRegister(workTime)

View File

@ -471,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) {
n := atomic.AddUint64(&s.countAndHotIdx, 1)
hotCounts := s.counts[n>>63]
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
return oldVal + v
})
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
@ -519,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
atomic.StoreUint64(&coldCounts.sumBits, 0)
break
}
}
// Use atomicUpdateFloat to update hotCounts.sumBits atomically.
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
return oldVal + sum.GetSampleSum()
})
atomic.StoreUint64(&coldCounts.sumBits, 0)
return nil
}

View File

@ -14,6 +14,7 @@
package promlint_test
import (
"errors"
"fmt"
"reflect"
"strings"
@ -733,7 +734,7 @@ request_duration_seconds{httpService="foo"} 10
func TestLintUnitAbbreviations(t *testing.T) {
genTest := func(n string) test {
return test{
name: fmt.Sprintf("%s with abbreviated unit", n),
name: n + " with abbreviated unit",
in: fmt.Sprintf(`
# HELP %s Test metric.
# TYPE %s gauge
@ -820,7 +821,7 @@ mc_something_total 10
prefixValidation := func(mf *dto.MetricFamily) []error {
if !strings.HasPrefix(mf.GetName(), "memcached_") {
return []error{fmt.Errorf("expected metric name to start with 'memcached_'")}
return []error{errors.New("expected metric name to start with 'memcached_'")}
}
return nil
}

View File

@ -14,7 +14,7 @@
package validations
import (
"fmt"
"errors"
"reflect"
dto "github.com/prometheus/client_model/go"
@ -27,7 +27,7 @@ func LintDuplicateMetric(mf *dto.MetricFamily) []error {
for i, m := range mf.Metric {
for _, k := range mf.Metric[i+1:] {
if reflect.DeepEqual(m.Label, k.Label) {
problems = append(problems, fmt.Errorf("metric not unique"))
problems = append(problems, errors.New("metric not unique"))
break
}
}

View File

@ -14,7 +14,6 @@
package prometheus
import (
"fmt"
"testing"
"time"
@ -51,7 +50,7 @@ func TestNewConstMetricInvalidLabelValues(t *testing.T) {
expectPanic(t, func() {
MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF")
}, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc))
}, "WithLabelValues: expected panic because: "+test.desc)
if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil {
t.Errorf("NewConstMetric: expected error because: %s", test.desc)

View File

@ -16,6 +16,7 @@ package prometheus
import (
"fmt"
"reflect"
"strconv"
"testing"
dto "github.com/prometheus/client_model/go"
@ -291,7 +292,7 @@ func testMetricVec(t *testing.T, vec *GaugeVec) {
expected := map[[2]string]int{}
for i := 0; i < 1000; i++ {
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
pair[0], pair[1] = strconv.Itoa(i%4), strconv.Itoa(i%5) // Varying combinations multiples.
expected[pair]++
vec.WithLabelValues(pair[0], pair[1]).Inc()
@ -363,7 +364,7 @@ func testConstrainedMetricVec(t *testing.T, vec *GaugeVec, constrain func(string
expected := map[[2]string]int{}
for i := 0; i < 1000; i++ {
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
pair[0], pair[1] = strconv.Itoa(i%4), strconv.Itoa(i%5) // Varying combinations multiples.
expected[[2]string{pair[0], constrain(pair[1])}]++
vec.WithLabelValues(pair[0], pair[1]).Inc()