Enable same linters as the Prometheus repo itself (#1056)

* Add gofumpt to github workflow & fix all files for it

Signed-off-by: sazary <soroosh@azary.ir>

* Add goimports to golangci & fix it's issues

Signed-off-by: sazary <soroosh@azary.ir>

* Add revive to golangci & fix it's issues

Signed-off-by: sazary <soroosh@azary.ir>

* Add errcheck & misspell to golangci and fix their issues

Signed-off-by: sazary <soroosh@azary.ir>

* Add govet & gosimple to golangci and fix their issues

Signed-off-by: sazary <soroosh@azary.ir>

* Enable all default linters of golangci

Signed-off-by: sazary <soroosh@azary.ir>
This commit is contained in:
Soroosh Azary Marhabi 2022-06-17 11:34:06 +04:30 committed by GitHub
parent ebd77f0360
commit 2cfd1eb960
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 179 additions and 146 deletions

View File

@ -1,3 +1,4 @@
---
name: golangci-lint
on:
push:

View File

@ -1,5 +1,31 @@
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
---
run:
deadline: 5m
output:
sort-results: true
linters:
enable:
- staticcheck
disable-all: true
- gofumpt
- goimports
- revive
- misspell
issues:
max-same-issues: 0
exclude-rules:
- path: _test.go
linters:
- errcheck
- govet
- structcheck
linters-settings:
errcheck:
exclude: scripts/errcheck_excludes.txt
goimports:
local-prefixes: github.com/prometheus/client_golang
gofumpt:
extra-rules: true

View File

@ -134,7 +134,6 @@ func BenchmarkClient(b *testing.B) {
for _, sizeKB := range []int{4, 50, 1000, 2000} {
b.Run(fmt.Sprintf("%dKB", sizeKB), func(b *testing.B) {
testServer := httptest.NewServer(serveSpaces{sizeKB})
defer testServer.Close()

View File

@ -109,7 +109,6 @@ func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) {
stream.WriteRaw(`"`)
stream.WriteArrayEnd()
}
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
@ -230,25 +229,25 @@ type API interface {
// Config returns the current Prometheus configuration.
Config(ctx context.Context) (ConfigResult, error)
// DeleteSeries deletes data for a selection of series in a time range.
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error
DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error
// Flags returns the flag values that Prometheus was launched with.
Flags(ctx context.Context) (FlagsResult, error)
// LabelNames returns the unique label names present in the block in sorted order by given time range and matchers.
LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error)
LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error)
// LabelValues performs a query for the values of the given label, time range and matchers.
LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error)
LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error)
// Query performs a query for the given time.
Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error)
// QueryRange performs a query for the given range.
QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error)
// QueryExemplars performs a query for exemplars by the given query and time range.
QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error)
QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error)
// Buildinfo returns various build information properties about the Prometheus server
Buildinfo(ctx context.Context) (BuildinfoResult, error)
// Runtimeinfo returns the various runtime information properties about the Prometheus server.
Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error)
// Series finds series by label matchers.
Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error)
Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error)
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
// under the TSDB's data directory and returns the directory as response.
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
@ -257,9 +256,9 @@ type API interface {
// Targets returns an overview of the current state of the Prometheus target discovery.
Targets(ctx context.Context) (TargetsResult, error)
// TargetsMetadata returns metadata about metrics currently scraped by the target.
TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error)
TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error)
// Metadata returns metadata about metrics currently scraped by the metric name.
Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error)
Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error)
// TSDB returns the cardinality statistics.
TSDB(ctx context.Context) (TSDBResult, error)
// WalReplay returns the current replay status of the wal.
@ -699,7 +698,7 @@ func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) {
return res, json.Unmarshal(body, &res)
}
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error {
u := h.client.URL(epDeleteSeries, nil)
q := u.Query()
@ -772,7 +771,7 @@ func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) {
return res, json.Unmarshal(body, &res)
}
func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) {
func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) {
u := h.client.URL(epLabels, nil)
q := u.Query()
q.Set("start", formatTime(startTime))
@ -795,7 +794,7 @@ func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime ti
return labelNames, w, json.Unmarshal(body, &labelNames)
}
func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) {
func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) {
u := h.client.URL(epLabelValues, map[string]string{"name": label})
q := u.Query()
q.Set("start", formatTime(startTime))
@ -833,7 +832,6 @@ func WithTimeout(timeout time.Duration) Option {
}
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) {
u := h.client.URL(epQuery, nil)
q := u.Query()
@ -890,7 +888,7 @@ func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ..
return model.Value(qres.v), warnings, json.Unmarshal(body, &qres)
}
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) {
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) {
u := h.client.URL(epSeries, nil)
q := u.Query()
@ -973,7 +971,7 @@ func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
return res, json.Unmarshal(body, &res)
}
func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) {
func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error) {
u := h.client.URL(epTargetsMetadata, nil)
q := u.Query()
@ -997,7 +995,7 @@ func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metri
return res, json.Unmarshal(body, &res)
}
func (h *httpAPI) Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) {
func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) {
u := h.client.URL(epMetadata, nil)
q := u.Query()
@ -1054,7 +1052,7 @@ func (h *httpAPI) WalReplay(ctx context.Context) (WalReplayStatus, error) {
return res, json.Unmarshal(body, &res)
}
func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) {
func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error) {
u := h.client.URL(epQueryExemplars, nil)
q := u.Query()
@ -1162,7 +1160,6 @@ func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Respon
}
return resp, []byte(result.Data), result.Warnings, err
}
// DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it

View File

@ -65,7 +65,6 @@ func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL {
}
func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) {
test := c.curTest
if req.URL.Path != test.reqPath {
@ -101,7 +100,6 @@ func (c *apiTestClient) DoGetFallback(ctx context.Context, u *url.URL, args url.
}
func TestAPIs(t *testing.T) {
testTime := time.Now()
tc := &apiTestClient{
@ -131,7 +129,7 @@ func TestAPIs(t *testing.T) {
}
}
doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) {
doDeleteSeries := func(matcher string, startTime, endTime time.Time) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) {
return nil, nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime)
}
@ -182,7 +180,7 @@ func TestAPIs(t *testing.T) {
}
}
doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) {
doSeries := func(matcher string, startTime, endTime time.Time) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) {
return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime)
}
@ -209,14 +207,14 @@ func TestAPIs(t *testing.T) {
}
}
doTargetsMetadata := func(matchTarget string, metric string, limit string) func() (interface{}, Warnings, error) {
doTargetsMetadata := func(matchTarget, metric, limit string) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) {
v, err := promAPI.TargetsMetadata(context.Background(), matchTarget, metric, limit)
return v, nil, err
}
}
doMetadata := func(metric string, limit string) func() (interface{}, Warnings, error) {
doMetadata := func(metric, limit string) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) {
v, err := promAPI.Metadata(context.Background(), metric, limit)
return v, nil, err
@ -237,7 +235,7 @@ func TestAPIs(t *testing.T) {
}
}
doQueryExemplars := func(query string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) {
doQueryExemplars := func(query string, startTime, endTime time.Time) func() (interface{}, Warnings, error) {
return func() (interface{}, Warnings, error) {
v, err := promAPI.QueryExemplars(context.Background(), query, startTime, endTime)
return v, nil, err
@ -471,7 +469,8 @@ func TestAPIs(t *testing.T) {
{
"__name__": "up",
"job": "prometheus",
"instance": "localhost:9090"},
"instance": "localhost:9090",
},
},
reqMethod: "GET",
reqPath: "/api/v1/series",
@ -495,7 +494,8 @@ func TestAPIs(t *testing.T) {
{
"__name__": "up",
"job": "prometheus",
"instance": "localhost:9090"},
"instance": "localhost:9090",
},
},
inWarnings: []string{"a"},
reqMethod: "GET",
@ -586,7 +586,8 @@ func TestAPIs(t *testing.T) {
{
"__name__": "up",
"job": "prometheus",
"instance": "localhost:9090"},
"instance": "localhost:9090",
},
},
reqMethod: "POST",
reqPath: "/api/v1/admin/tsdb/delete_series",
@ -1115,7 +1116,7 @@ func TestAPIs(t *testing.T) {
"limit": []string{"1"},
},
res: map[string][]Metadata{
"go_goroutines": []Metadata{
"go_goroutines": {
{
Type: "gauge",
Help: "Number of goroutines that currently exist.",
@ -1523,7 +1524,6 @@ func TestAPIClientDo(t *testing.T) {
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
tc.ch <- test
_, body, warnings, err := client.Do(context.Background(), tc.req)
@ -1564,7 +1564,6 @@ func TestAPIClientDo(t *testing.T) {
t.Fatalf("expected body :%v, but got:%v", test.expectedBody, string(body))
}
})
}
}

View File

@ -22,9 +22,10 @@ import (
"os"
"time"
"github.com/prometheus/common/config"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/config"
)
func ExampleAPI_query() {

View File

@ -30,7 +30,6 @@ func (c collectorDescribedByCollect) Describe(ch chan<- *Desc) {
}
func TestDescribeByCollect(t *testing.T) {
goodCollector := collectorDescribedByCollect{
cnt: NewCounter(CounterOpts{Name: "c1", Help: "help c1"}),
gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}),

View File

@ -72,7 +72,8 @@ const (
//
// The current default is GoRuntimeMemStatsCollection, so the compatibility mode with
// client_golang pre v1.12 (move to runtime/metrics).
func WithGoCollections(flags GoCollectionOption) goOption {
//nolint:staticcheck // Ignore SA1019 until v2.
func WithGoCollections(flags GoCollectionOption) func(options *prometheus.GoCollectorOptions) {
return func(o *goOptions) {
o.EnabledCollections = uint32(flags)
}

View File

@ -231,7 +231,7 @@ func TestCounterExemplar(t *testing.T) {
}
expectedExemplar := &dto.Exemplar{
Label: []*dto.LabelPair{
&dto.LabelPair{Name: proto.String("foo"), Value: proto.String("bar")},
{Name: proto.String("foo"), Value: proto.String("bar")},
},
Value: proto.Float64(42),
Timestamp: ts,

View File

@ -20,6 +20,7 @@ import (
"strings"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/client_golang/prometheus/internal"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.

View File

@ -108,7 +108,6 @@ func (v *InfoVec) MustCurryWith(labels prometheus.Labels) *InfoVec {
}
func ExampleMetricVec() {
infoVec := NewInfoVec(
"library_version_info",
"Versions of the libraries used in this binary.",

View File

@ -19,27 +19,25 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var (
// apiRequestDuration tracks the duration separate for each HTTP status
// class (1xx, 2xx, ...). This creates a fair amount of time series on
// the Prometheus server. Usually, you would track the duration of
// serving HTTP request without partitioning by outcome. Do something
// like this only if needed. Also note how only status classes are
// tracked, not every single status code. The latter would create an
// even larger amount of time series. Request counters partitioned by
// status code are usually OK as each counter only creates one time
// series. Histograms are way more expensive, so partition with care and
// only where you really need separate latency tracking. Partitioning by
// status class is only an example. In concrete cases, other partitions
// might make more sense.
apiRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "api_request_duration_seconds",
Help: "Histogram for the request duration of the public API, partitioned by status class.",
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
},
[]string{"status_class"},
)
// apiRequestDuration tracks the duration separate for each HTTP status
// class (1xx, 2xx, ...). This creates a fair amount of time series on
// the Prometheus server. Usually, you would track the duration of
// serving HTTP request without partitioning by outcome. Do something
// like this only if needed. Also note how only status classes are
// tracked, not every single status code. The latter would create an
// even larger amount of time series. Request counters partitioned by
// status code are usually OK as each counter only creates one time
// series. Histograms are way more expensive, so partition with care and
// only where you really need separate latency tracking. Partitioning by
// status class is only an example. In concrete cases, other partitions
// might make more sense.
var apiRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "api_request_duration_seconds",
Help: "Histogram for the request duration of the public API, partitioned by status class.",
Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
},
[]string{"status_class"},
)
func handler(w http.ResponseWriter, r *http.Request) {

View File

@ -19,17 +19,15 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var (
// If a function is called rarely (i.e. not more often than scrapes
// happen) or ideally only once (like in a batch job), it can make sense
// to use a Gauge for timing the function call. For timing a batch job
// and pushing the result to a Pushgateway, see also the comprehensive
// example in the push package.
funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "example_function_duration_seconds",
Help: "Duration of the last call of an example function.",
})
)
// If a function is called rarely (i.e. not more often than scrapes
// happen) or ideally only once (like in a batch job), it can make sense
// to use a Gauge for timing the function call. For timing a batch job
// and pushing the result to a Pushgateway, see also the comprehensive
// example in the push package.
var funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "example_function_duration_seconds",
Help: "Duration of the last call of an example function.",
})
func run() error {
// The Set method of the Gauge is used to observe the duration.

View File

@ -20,13 +20,11 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var (
requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "example_request_duration_seconds",
Help: "Histogram for the runtime of a simple example function.",
Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
})
)
var requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "example_request_duration_seconds",
Help: "Histogram for the runtime of a simple example function.",
Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
})
func ExampleTimer() {
// timer times this example function. It uses a Histogram, but a Summary

View File

@ -54,7 +54,8 @@ const (
)
// runtime/metrics names required for runtimeMemStats like logic.
var rmForMemStats = []string{goGCHeapTinyAllocsObjects,
var rmForMemStats = []string{
goGCHeapTinyAllocsObjects,
goGCHeapAllocsObjects,
goGCHeapFreesObjects,
goGCHeapAllocsBytes,

View File

@ -240,9 +240,8 @@ func writeMetric(buf *bufio.Writer, m model.Metric, useTags bool) error {
}
if useTags {
return writeTags(buf, m)
} else {
return writeLabels(buf, m, numLabels)
}
return writeLabels(buf, m, numLabels)
}
return nil
}

View File

@ -354,7 +354,8 @@ func TestBuckets(t *testing.T) {
}
got = ExponentialBucketsRange(1, 100, 10)
want = []float64{1.0, 1.6681005372000588, 2.782559402207125,
want = []float64{
1.0, 1.6681005372000588, 2.782559402207125,
4.641588833612779, 7.742636826811273, 12.915496650148842,
21.544346900318846, 35.93813663804629, 59.94842503189414,
100.00000000000007,

View File

@ -106,8 +106,8 @@ func NewMatcher(a, b []string) *SequenceMatcher {
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher {
isJunk func(string) bool,
) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b)
return &m
@ -163,12 +163,12 @@ func (m *SequenceMatcher) chainB() {
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s, _ := range b2j {
for s := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s, _ := range junk {
for s := range junk {
delete(b2j, s)
}
}
@ -183,7 +183,7 @@ func (m *SequenceMatcher) chainB() {
popular[s] = struct{}{}
}
}
for s, _ := range popular {
for s := range popular {
delete(b2j, s)
}
}
@ -270,7 +270,7 @@ func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
bestsize++
}
// Now that we have a wholly interesting match (albeit possibly
@ -287,7 +287,7 @@ func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
bestsize++
}
return Match{A: besti, B: bestj, Size: bestsize}
@ -439,8 +439,10 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
// End the current group and start a new one whenever
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n)})
group = append(group, OpCode{
c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n),
})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
@ -498,7 +500,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
}
avail[s] = n - 1
if n > 0 {
matches += 1
matches++
}
}
return calculateRatio(matches, len(m.a)+len(m.b))
@ -522,7 +524,7 @@ func formatRangeUnified(start, stop int) string {
return fmt.Sprintf("%d", beginning)
}
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
beginning-- // empty ranges begin at line just before the range
}
return fmt.Sprintf("%d,%d", beginning, length)
}
@ -637,7 +639,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err
return w.String(), err
}
// Split a string on "\n" while preserving them. The output can be used

View File

@ -58,7 +58,7 @@ func TestGetOptCodes(t *testing.T) {
fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag),
op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2])
}
result := string(w.Bytes())
result := w.String()
expected := `d a[0:1], (q) b[0:0] ()
e a[1:3], (ab) b[0:2] (ab)
r a[3:4], (x) b[2:3] (y)
@ -93,7 +93,7 @@ func TestGroupedOpCodes(t *testing.T) {
op.I1, op.I2, op.J1, op.J2)
}
}
result := string(w.Bytes())
result := w.String()
expected := `group
e, 5, 8, 5, 8
i, 8, 8, 8, 9
@ -185,14 +185,14 @@ func TestWithAsciiBJunk(t *testing.T) {
sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}})
assertEqual(t, sm.bJunk, map[string]struct{}{" ": {}})
isJunk = func(s string) bool {
return s == " " || s == "b"
}
sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}})
assertEqual(t, sm.bJunk, map[string]struct{}{" ": {}, "b": {}})
}
func TestSFBugsRatioForNullSeqn(t *testing.T) {

View File

@ -75,5 +75,4 @@ func TestWithExemplarsMetric(t *testing.T) {
}
}
})
}

View File

@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
type closeNotifierDelegator struct{ *responseWriterDelegator }
type flusherDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator }
type (
closeNotifierDelegator struct{ *responseWriterDelegator }
flusherDelegator struct{ *responseWriterDelegator }
hijackerDelegator struct{ *responseWriterDelegator }
readerFromDelegator struct{ *responseWriterDelegator }
pusherDelegator struct{ *responseWriterDelegator }
)
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() {
}
d.ResponseWriter.(http.Flusher).Flush()
}
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
d.written += n
return n, err
}
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
@ -261,7 +267,7 @@ func init() {
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
return struct {
*responseWriterDelegator
http.Pusher

View File

@ -24,8 +24,9 @@ import (
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
type errorCollector struct{}

View File

@ -246,7 +246,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
func checkLabels(c prometheus.Collector) (code bool, method bool) {
func checkLabels(c prometheus.Collector) (code, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (

View File

@ -279,7 +279,7 @@ func TestLabels(t *testing.T) {
ok: false,
},
}
checkLabels := func(labels []string) (gotCode bool, gotMethod bool) {
checkLabels := func(labels []string) (gotCode, gotMethod bool) {
for _, label := range labels {
switch label {
case "code":

View File

@ -50,11 +50,10 @@ func ExampleInstrumentHandlerWithExtraMethods() {
// Instrument the handlers with all the metrics, injecting the "handler"
// label by currying.
pullChain :=
InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}),
InstrumentHandlerCounter(counter, pullHandler, opts),
opts,
)
pullChain := InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}),
InstrumentHandlerCounter(counter, pullHandler, opts),
opts,
)
http.Handle("/metrics", Handler())
http.Handle("/pull", pullChain)

View File

@ -98,9 +98,7 @@ func New(url, job string) *Pusher {
if !strings.Contains(url, "://") {
url = "http://" + url
}
if strings.HasSuffix(url, "/") {
url = url[:len(url)-1]
}
url = strings.TrimSuffix(url, "/")
return &Pusher{
error: err,
@ -273,7 +271,11 @@ func (p *Pusher) push(ctx context.Context, method string) error {
}
}
}
enc.Encode(mf)
if err := enc.Encode(mf); err != nil {
return fmt.Errorf(
"failed to encode metric familty %s, error is %w",
mf.GetName(), err)
}
}
req, err := http.NewRequestWithContext(ctx, method, p.fullURL(), buf)
if err != nil {

View File

@ -26,7 +26,6 @@ import (
)
func TestPush(t *testing.T) {
var (
lastMethod string
lastBody []byte

View File

@ -582,7 +582,7 @@ func WriteToTextfile(filename string, g Gatherer) error {
return err
}
if err := os.Chmod(tmp.Name(), 0644); err != nil {
if err := os.Chmod(tmp.Name(), 0o644); err != nil {
return err
}
return os.Rename(tmp.Name(), filename)

View File

@ -349,7 +349,7 @@ collected metric "broken_metric" { label:<name:"foo" value:"bar" > label:<name:"
body []byte
}
var scenarios = []struct {
scenarios := []struct {
headers map[string]string
out output
collector prometheus.Collector
@ -1118,7 +1118,6 @@ func (m *collidingCollector) Collect(metric chan<- prometheus.Metric) {
// TestAlreadyRegistered will fail with the old, weaker hash function. It is
// taken from https://play.golang.org/p/HpV7YE6LI_4 , authored by @awilliams.
func TestAlreadyRegisteredCollision(t *testing.T) {
reg := prometheus.NewRegistry()
for i := 0; i < 10000; i++ {

View File

@ -283,7 +283,7 @@ func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
// metricUnits attempts to detect known unit types used as part of a metric name,
// e.g. "foo_bytes_total" or "bar_baz_milligrams".
func metricUnits(m string) (unit string, base string, ok bool) {
func metricUnits(m string) (unit, base string, ok bool) {
ss := strings.Split(m, "_")
for unit, base := range units {

View File

@ -330,7 +330,8 @@ thermometers_rankine 10
Metric: "thermometers_rankine",
Text: `use base unit "celsius" instead of "rankine"`,
}},
}, {
},
{
name: "inches",
in: `
# HELP x_inches Test metric.
@ -341,7 +342,8 @@ x_inches 10
Metric: "x_inches",
Text: `use base unit "meters" instead of "inches"`,
}},
}, {
},
{
name: "yards",
in: `
# HELP x_yards Test metric.
@ -352,7 +354,8 @@ x_yards 10
Metric: "x_yards",
Text: `use base unit "meters" instead of "yards"`,
}},
}, {
},
{
name: "miles",
in: `
# HELP x_miles Test metric.
@ -363,7 +366,8 @@ x_miles 10
Metric: "x_miles",
Text: `use base unit "meters" instead of "miles"`,
}},
}, {
},
{
name: "bits",
in: `
# HELP x_bits Test metric.

View File

@ -101,7 +101,9 @@ func ToFloat64(c prometheus.Collector) float64 {
}
pb := &dto.Metric{}
m.Write(pb)
if err := m.Write(pb); err != nil {
panic(fmt.Errorf("error happened while collecting metrics: %w", err))
}
if pb.Gauge != nil {
return pb.Gauge.GetValue()
}
@ -221,7 +223,7 @@ func compare(got, want []*dto.MetricFamily) error {
// diff returns a diff of both values as long as both are of the same type and
// are a struct, map, slice, array or string. Otherwise it returns an empty string.
func diff(expected interface{}, actual interface{}) string {
func diff(expected, actual interface{}) string {
if expected == nil || actual == nil {
return ""
}

View File

@ -148,5 +148,4 @@ func TestTimerByOutcome(t *testing.T) {
if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
}
}

View File

@ -21,9 +21,10 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
)

View File

@ -571,7 +571,7 @@ func findMetricWithLabels(
return len(metrics)
}
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
if len(values) != len(lvs)+len(curry) {
return false
}

View File

@ -49,7 +49,7 @@ func testDelete(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
@ -57,7 +57,7 @@ func testDelete(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
@ -65,7 +65,7 @@ func testDelete(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
t.Errorf("got %v, want %v", got, want)
}
@ -103,8 +103,8 @@ func testDeleteLabelValues(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision.
vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v3"}).Set(42) // Add junk data for collision.
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
t.Errorf("got %v, want %v", got, want)
}
@ -115,7 +115,7 @@ func testDeleteLabelValues(t *testing.T, vec *GaugeVec) {
t.Errorf("got %v, want %v", got, want)
}
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42)
vec.With(Labels{"l1": "v1", "l2": "v2"}).Set(42)
// Delete out of order.
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
t.Errorf("got %v, want %v", got, want)
@ -146,9 +146,9 @@ func TestDeletePartialMatch(t *testing.T) {
}
baseVec.With(Labels{"l1": "baseValue1", "l2": "baseValue2", "l3": "baseValue3"}).Inc()
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff1BaseValue2", "l3": "v3"}).(Gauge).Set(42)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff2BaseValue2", "l3": "v3"}).(Gauge).Set(84)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff3BaseValue2", "l3": "v3"}).(Gauge).Set(168)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff1BaseValue2", "l3": "v3"}).Set(42)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff2BaseValue2", "l3": "v3"}).Set(84)
baseVec.With(Labels{"l1": "multiDeleteV1", "l2": "diff3BaseValue2", "l3": "v3"}).Set(168)
curriedVec := baseVec.MustCurryWith(Labels{"l2": "curriedValue2"})
curriedVec.WithLabelValues("curriedValue1", "curriedValue3").Inc()
@ -249,7 +249,7 @@ func testMetricVec(t *testing.T, vec *GaugeVec) {
vec.WithLabelValues(pair[0], pair[1]).Inc()
expected[[2]string{"v1", "v2"}]++
vec.WithLabelValues("v1", "v2").(Gauge).Inc()
vec.WithLabelValues("v1", "v2").Inc()
}
var total int
@ -351,7 +351,6 @@ func TestCurryVecWithCollisions(t *testing.T) {
}
func testCurryVec(t *testing.T, vec *CounterVec) {
assertMetrics := func(t *testing.T) {
n := 0
for _, m := range vec.metricMap.metrics {
@ -538,7 +537,6 @@ func testCurryVec(t *testing.T, vec *CounterVec) {
} else if err.Error() != `label name "three" is already curried` {
t.Error("currying returned unexpected error:", err)
}
})
t.Run("unknown label", func(t *testing.T) {
if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil {

View File

@ -20,8 +20,9 @@ import (
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
)
// WrapRegistererWith returns a Registerer wrapping the provided

View File

@ -46,7 +46,6 @@ func toMetricFamilies(cs ...Collector) []*dto.MetricFamily {
}
func TestWrap(t *testing.T) {
simpleCnt := NewCounter(CounterOpts{
Name: "simpleCnt",
Help: "helpSimpleCnt",
@ -319,7 +318,6 @@ func TestWrap(t *testing.T) {
}
})
}
}
func TestNil(t *testing.T) {

View File

@ -0,0 +1,5 @@
// The following 2 methods always return nil as the error
(*github.com/cespare/xxhash/v2.Digest).Write
(*github.com/cespare/xxhash/v2.Digest).WriteString
(*bufio.Writer).WriteRune