gocollector: Added options to Go Collector for changing the (#1031)

* Renamed files.

Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>

* gocollector: Added options to Go Collector for diffetent collections.

Fixes https://github.com/prometheus/client_golang/issues/983

Also:

* fixed TestMemStatsEquivalence, it was noop before (:
* Removed gc_cpu_fraction metric completely, since it's not working completely for Go1.17+

Signed-off-by: Bartlomiej Plotka <bwplotka@gmail.com>
This commit is contained in:
Bartlomiej Plotka 2022-04-13 10:55:22 +02:00 committed by GitHub
parent cc7991d977
commit 24172847e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 398 additions and 161 deletions

View File

@ -14,3 +14,27 @@
// Package collectors provides implementations of prometheus.Collector to
// conveniently collect process and Go-related metrics.
package collectors
import "github.com/prometheus/client_golang/prometheus"
// NewBuildInfoCollector returns a collector collecting a single metric
// "go_build_info" with the constant value 1 and three labels "path", "version",
// and "checksum". Their label values contain the main module path, version, and
// checksum, respectively. The labels will only have meaningful values if the
// binary is built with Go module support and from source code retrieved from
// the source repository (rather than the local file system). This is usually
// accomplished by building from outside of GOPATH, specifying the full address
// of the main package, e.g. "GO111MODULE=on go run
// github.com/prometheus/client_golang/examples/random". If built without Go
// module support, all label values will be "unknown". If built with Go module
// support but using the source code from the local file system, the "path" will
// be set appropriately, but "checksum" will be empty and "version" will be
// "(devel)".
//
// This collector uses only the build information for the main module. See
// https://github.com/povilasv/prommod for an example of a collector for the
// module dependencies.
func NewBuildInfoCollector() prometheus.Collector {
//nolint:staticcheck // Ignore SA1019 until v2.
return prometheus.NewBuildInfoCollector()
}

View File

@ -11,6 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.17
// +build !go1.17
package collectors
import "github.com/prometheus/client_golang/prometheus"
@ -42,28 +45,5 @@ import "github.com/prometheus/client_golang/prometheus"
// NOTE: The problem is solved in Go 1.15, see
// https://github.com/golang/go/issues/19812 for the related Go issue.
func NewGoCollector() prometheus.Collector {
//nolint:staticcheck // Ignore SA1019 until v2.
return prometheus.NewGoCollector()
}
// NewBuildInfoCollector returns a collector collecting a single metric
// "go_build_info" with the constant value 1 and three labels "path", "version",
// and "checksum". Their label values contain the main module path, version, and
// checksum, respectively. The labels will only have meaningful values if the
// binary is built with Go module support and from source code retrieved from
// the source repository (rather than the local file system). This is usually
// accomplished by building from outside of GOPATH, specifying the full address
// of the main package, e.g. "GO111MODULE=on go run
// github.com/prometheus/client_golang/examples/random". If built without Go
// module support, all label values will be "unknown". If built with Go module
// support but using the source code from the local file system, the "path" will
// be set appropriately, but "checksum" will be empty and "version" will be
// "(devel)".
//
// This collector uses only the build information for the main module. See
// https://github.com/povilasv/prommod for an example of a collector for the
// module dependencies.
func NewBuildInfoCollector() prometheus.Collector {
//nolint:staticcheck // Ignore SA1019 until v2.
return prometheus.NewBuildInfoCollector()
}

View File

@ -0,0 +1,91 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package collectors
import "github.com/prometheus/client_golang/prometheus"
//nolint:staticcheck // Ignore SA1019 until v2.
type goOptions = prometheus.GoCollectorOptions
type goOption func(o *goOptions)
type GoCollectionOption uint32
const (
// GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure such as
// go_memstats_alloc_bytes
// go_memstats_alloc_bytes_total
// go_memstats_sys_bytes
// go_memstats_lookups_total
// go_memstats_mallocs_total
// go_memstats_frees_total
// go_memstats_heap_alloc_bytes
// go_memstats_heap_sys_bytes
// go_memstats_heap_idle_bytes
// go_memstats_heap_inuse_bytes
// go_memstats_heap_released_bytes
// go_memstats_heap_objects
// go_memstats_stack_inuse_bytes
// go_memstats_stack_sys_bytes
// go_memstats_mspan_inuse_bytes
// go_memstats_mspan_sys_bytes
// go_memstats_mcache_inuse_bytes
// go_memstats_mcache_sys_bytes
// go_memstats_buck_hash_sys_bytes
// go_memstats_gc_sys_bytes
// go_memstats_other_sys_bytes
// go_memstats_next_gc_bytes
// so the metrics known from pre client_golang v1.12.0, except skipped go_memstats_gc_cpu_fraction (see
// https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation.
//
// NOTE that this mode represents runtime.MemStats statistics, but they are
// actually implemented using new runtime/metrics package.
// Deprecated: Use GoRuntimeMetricsCollection instead going forward.
GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota
// GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package and follows
// consistent naming. The exposed metric set depends on Go version, but it is controlled against
// unexpected cardinality. This set has overlapping information with GoRuntimeMemStatsCollection, just with
// new names. GoRuntimeMetricsCollection is what is recommended for using going forward.
GoRuntimeMetricsCollection
)
// WithGoCollections allows enabling different collections for Go collector on top of base metrics
// like go_goroutines, go_threads, go_gc_duration_seconds, go_memstats_last_gc_time_seconds, go_info.
//
// Check GoRuntimeMemStatsCollection and GoRuntimeMetricsCollection for more details. You can use none,
// one or more collections at once. For example:
// WithGoCollections(GoRuntimeMemStatsCollection | GoRuntimeMetricsCollection) means both GoRuntimeMemStatsCollection
// metrics and GoRuntimeMetricsCollection will be exposed.
//
// Use WithGoCollections(GoRuntimeMemStatsCollection) to have Go collector working in
// the compatibility mode with client_golang pre v1.12 (move to runtime/metrics).
func WithGoCollections(flags uint32) goOption {
return func(o *goOptions) {
o.EnabledCollections = flags
}
}
// NewGoCollector returns a collector that exports metrics about the current Go
// process using debug.GCStats using runtime/metrics.
func NewGoCollector(opts ...goOption) prometheus.Collector {
//nolint:staticcheck // Ignore SA1019 until v2.
promPkgOpts := make([]func(o *prometheus.GoCollectorOptions), len(opts))
for i, opt := range opts {
promPkgOpts[i] = opt
}
//nolint:staticcheck // Ignore SA1019 until v2.
return prometheus.NewGoCollector(promPkgOpts...)
}

View File

@ -197,14 +197,6 @@ func goRuntimeMemStats() memStatsMetrics {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
},
}
}
@ -268,7 +260,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) {
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
}
@ -278,6 +269,7 @@ func memstatNamespace(s string) string {
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
// TODO(bwplotka): Remove with end Go 1.16 EOL and replace with runtime/metrics.Description
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64

View File

@ -40,13 +40,28 @@ type goCollector struct {
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
msMetrics := goRuntimeMemStats()
msMetrics = append(msMetrics, struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}{
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
})
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: goRuntimeMemStats(),
msMetrics: msMetrics,
}
}

View File

@ -29,7 +29,66 @@ import (
dto "github.com/prometheus/client_model/go"
)
const (
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
goGCHeapFreesObjects = "/gc/heap/frees:objects"
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
goGCHeapObjects = "/gc/heap/objects:objects"
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
)
// runtime/metrics names required for runtimeMemStats like logic.
var rmForMemStats = []string{goGCHeapTinyAllocsObjects,
goGCHeapAllocsObjects,
goGCHeapFreesObjects,
goGCHeapAllocsBytes,
goGCHeapObjects,
goGCHeapGoalBytes,
goMemoryClassesTotalBytes,
goMemoryClassesHeapObjectsBytes,
goMemoryClassesHeapUnusedBytes,
goMemoryClassesHeapReleasedBytes,
goMemoryClassesHeapFreeBytes,
goMemoryClassesHeapStacksBytes,
goMemoryClassesOSStacksBytes,
goMemoryClassesMetadataMSpanInuseBytes,
goMemoryClassesMetadataMSPanFreeBytes,
goMemoryClassesMetadataMCacheInuseBytes,
goMemoryClassesMetadataMCacheFreeBytes,
goMemoryClassesProfilingBucketsBytes,
goMemoryClassesMetadataOtherBytes,
goMemoryClassesOtherBytes,
}
func bestEffortLookupRM(lookup []string) []metrics.Description {
ret := make([]metrics.Description, 0, len(lookup))
for _, rm := range metrics.All() {
for _, m := range lookup {
if m == rm.Name {
ret = append(ret, rm)
}
}
}
return ret
}
type goCollector struct {
opt GoCollectorOptions
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
@ -51,12 +110,46 @@ type goCollector struct {
msMetrics memStatsMetrics
}
const (
// Those are not exposed due to need to move Go collector to another package in v2.
// See issue https://github.com/prometheus/client_golang/issues/1030.
goRuntimeMemStatsCollection uint32 = 1 << iota
goRuntimeMetricsCollection
)
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
// Use it via collectors package instead. See issue
// https://github.com/prometheus/client_golang/issues/1030.
//
// Deprecated: Use collectors.WithGoCollections
type GoCollectorOptions struct {
// EnabledCollection sets what type of collections collector should expose on top of base collection.
// By default it's goMemStatsCollection | goRuntimeMetricsCollection.
EnabledCollections uint32
}
func (c GoCollectorOptions) isEnabled(flag uint32) bool {
return c.EnabledCollections&flag != 0
}
const defaultGoCollections = goRuntimeMemStatsCollection | goRuntimeMetricsCollection
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
func NewGoCollector(opts ...func(o *GoCollectorOptions)) Collector {
opt := GoCollectorOptions{EnabledCollections: defaultGoCollections}
for _, o := range opts {
o(&opt)
}
var descriptions []metrics.Description
if opt.isEnabled(goRuntimeMetricsCollection) {
descriptions = metrics.All()
} else if opt.isEnabled(goRuntimeMemStatsCollection) {
descriptions = bestEffortLookupRM(rmForMemStats)
}
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
@ -67,7 +160,11 @@ func NewGoCollector() Collector {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
if len(histograms) > 0 {
metrics.Read(histograms)
}
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
@ -83,7 +180,7 @@ func NewGoCollector() Collector {
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
// to fail here. This condition is tested elsewhere.
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
@ -123,12 +220,18 @@ func NewGoCollector() Collector {
}
metricSet = append(metricSet, m)
}
var msMetrics memStatsMetrics
if opt.isEnabled(goRuntimeMemStatsCollection) {
msMetrics = goRuntimeMemStats()
}
return &goCollector{
opt: opt,
base: newBaseGoCollector(),
rmSampleBuf: sampleBuf,
rmSampleMap: sampleMap,
rmMetrics: metricSet,
msMetrics: goRuntimeMemStats(),
msMetrics: msMetrics,
}
}
@ -163,40 +266,47 @@ func (c *goCollector) Collect(ch chan<- Metric) {
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
if len(c.rmSampleBuf) > 0 {
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
}
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
if c.opt.isEnabled(goRuntimeMetricsCollection) {
// Collect all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
// populate the old metrics from it if goMemStatsCollection is enabled.
if c.opt.isEnabled(goRuntimeMemStatsCollection) {
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
}
}
}
@ -261,35 +371,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
ms.Lookups = 0 // Already always zero.
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
ms.Alloc = ms.HeapAlloc
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
// N.B. LastGC is omitted because runtime.GCStats already has this.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.LastGC = 0
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime

View File

@ -28,78 +28,96 @@ import (
dto "github.com/prometheus/client_model/go"
)
func TestGoCollectorRuntimeMetrics(t *testing.T) {
metrics := collectGoMetrics(t)
msChecklist := make(map[string]bool)
for _, m := range goRuntimeMemStats() {
msChecklist[m.desc.fqName] = false
}
if len(metrics) == 0 {
t.Fatal("no metrics created by Collect")
}
// Check a few specific metrics.
//
// Checking them all is somewhat pointless because the runtime/metrics
// metrics are going to shift underneath us. Also if we try to check
// against the runtime/metrics package in an automated fashion we're kind
// of missing the point, because we have to do all the same work the code
// has to do to perform the translation. Same for supporting old metric
// names (the best we can do here is make sure they're all accounted for).
var sysBytes, allocs float64
for _, m := range metrics {
name := m.Desc().fqName
switch name {
case "go_memory_classes_total_bytes":
checkMemoryMetric(t, m, &sysBytes)
case "go_sys_bytes":
checkMemoryMetric(t, m, &sysBytes)
case "go_gc_heap_allocs_bytes_total":
checkMemoryMetric(t, m, &allocs)
case "go_alloc_bytes_total":
checkMemoryMetric(t, m, &allocs)
}
if present, ok := msChecklist[name]; ok {
if present {
t.Errorf("memstats metric %s found more than once", name)
}
msChecklist[name] = true
}
}
for name := range msChecklist {
if present := msChecklist[name]; !present {
t.Errorf("memstats metric %s not collected", name)
}
func TestRmForMemStats(t *testing.T) {
if got, want := len(bestEffortLookupRM(rmForMemStats)), len(rmForMemStats); got != want {
t.Errorf("got %d, want %d metrics", got, want)
}
}
func checkMemoryMetric(t *testing.T, m Metric, expValue *float64) {
t.Helper()
func expectedBaseMetrics() map[string]struct{} {
metrics := map[string]struct{}{}
b := newBaseGoCollector()
for _, m := range []string{
b.gcDesc.fqName,
b.goInfoDesc.fqName,
b.goroutinesDesc.fqName,
b.gcLastTimeDesc.fqName,
b.threadsDesc.fqName,
} {
metrics[m] = struct{}{}
}
return metrics
}
pb := &dto.Metric{}
m.Write(pb)
var value float64
if g := pb.GetGauge(); g != nil {
value = g.GetValue()
} else {
value = pb.GetCounter().GetValue()
func addExpectedRuntimeMemStats(metrics map[string]struct{}) map[string]struct{} {
for _, m := range goRuntimeMemStats() {
metrics[m.desc.fqName] = struct{}{}
}
if value <= 0 {
t.Error("bad value for total memory")
return metrics
}
func addExpectedRuntimeMetrics(metrics map[string]struct{}) map[string]struct{} {
for _, m := range expectedRuntimeMetrics {
metrics[m] = struct{}{}
}
if *expValue == 0 {
*expValue = value
} else if value != *expValue {
t.Errorf("legacy metric and runtime/metrics metric do not match: want %d, got %d", int64(*expValue), int64(value))
return metrics
}
func TestGoCollector(t *testing.T) {
for _, tcase := range []struct {
collections uint32
expectedFQNameSet map[string]struct{}
}{
{
collections: 0,
expectedFQNameSet: expectedBaseMetrics(),
},
{
collections: goRuntimeMemStatsCollection,
expectedFQNameSet: addExpectedRuntimeMemStats(expectedBaseMetrics()),
},
{
collections: goRuntimeMetricsCollection,
expectedFQNameSet: addExpectedRuntimeMetrics(expectedBaseMetrics()),
},
{
collections: goRuntimeMemStatsCollection | goRuntimeMetricsCollection,
expectedFQNameSet: addExpectedRuntimeMemStats(addExpectedRuntimeMetrics(expectedBaseMetrics())),
},
} {
if ok := t.Run("", func(t *testing.T) {
goMetrics := collectGoMetrics(t, tcase.collections)
goMetricSet := make(map[string]Metric)
for _, m := range goMetrics {
goMetricSet[m.Desc().fqName] = m
}
for i := range goMetrics {
name := goMetrics[i].Desc().fqName
if _, ok := tcase.expectedFQNameSet[name]; !ok {
t.Errorf("found unpexpected metric %s", name)
continue
}
}
// Now iterate over the expected metrics and look for removals.
for expectedName := range tcase.expectedFQNameSet {
if _, ok := goMetricSet[expectedName]; !ok {
t.Errorf("missing expected metric %s in collection", expectedName)
continue
}
}
}); !ok {
return
}
}
}
var sink interface{}
func TestBatchHistogram(t *testing.T) {
goMetrics := collectGoMetrics(t)
goMetrics := collectGoMetrics(t, defaultGoCollections)
var mhist Metric
for _, m := range goMetrics {
@ -126,7 +144,7 @@ func TestBatchHistogram(t *testing.T) {
for i := 0; i < 100; i++ {
sink = make([]byte, 128)
}
collectGoMetrics(t)
collectGoMetrics(t, defaultGoCollections)
for i, v := range hist.counts {
if v != countsCopy[i] {
t.Error("counts changed during new collection")
@ -175,10 +193,12 @@ func TestBatchHistogram(t *testing.T) {
}
}
func collectGoMetrics(t *testing.T) []Metric {
func collectGoMetrics(t *testing.T, enabledCollections uint32) []Metric {
t.Helper()
c := NewGoCollector().(*goCollector)
c := NewGoCollector(func(o *GoCollectorOptions) {
o.EnabledCollections = enabledCollections
}).(*goCollector)
// Collect all metrics.
ch := make(chan Metric)
@ -201,7 +221,8 @@ func collectGoMetrics(t *testing.T) []Metric {
func TestMemStatsEquivalence(t *testing.T) {
var msReal, msFake runtime.MemStats
descs := metrics.All()
descs := bestEffortLookupRM(rmForMemStats)
samples := make([]metrics.Sample, len(descs))
samplesMap := make(map[string]*metrics.Sample)
for i := range descs {
@ -214,9 +235,9 @@ func TestMemStatsEquivalence(t *testing.T) {
// Populate msReal.
runtime.ReadMemStats(&msReal)
// Populate msFake.
// Populate msFake and hope that no GC happened in between (:
metrics.Read(samples)
memStatsFromRM(&msFake, samplesMap)
// Iterate over them and make sure they're somewhat close.
@ -227,9 +248,16 @@ func TestMemStatsEquivalence(t *testing.T) {
for i := 0; i < msRealValue.NumField(); i++ {
fr := msRealValue.Field(i)
ff := msFakeValue.Field(i)
switch typ.Kind() {
if typ.Field(i).Name == "PauseTotalNs" || typ.Field(i).Name == "LastGC" {
// We don't use those fields for metrics,
// thus we are not interested in having this filled.
continue
}
switch fr.Kind() {
// Fields which we are interested in are all uint64s.
// The only float64 field GCCPUFraction is by design omitted.
case reflect.Uint64:
// N.B. Almost all fields of MemStats are uint64s.
vr := fr.Interface().(uint64)
vf := ff.Interface().(uint64)
if float64(vr-vf)/float64(vf) > 0.05 {
@ -240,7 +268,7 @@ func TestMemStatsEquivalence(t *testing.T) {
}
func TestExpectedRuntimeMetrics(t *testing.T) {
goMetrics := collectGoMetrics(t)
goMetrics := collectGoMetrics(t, goRuntimeMetricsCollection)
goMetricSet := make(map[string]Metric)
for _, m := range goMetrics {
goMetricSet[m.Desc().fqName] = m
@ -253,6 +281,7 @@ func TestExpectedRuntimeMetrics(t *testing.T) {
rmName := descs[i].Name
rmSet[rmName] = struct{}{}
// expectedRuntimeMetrics depends on Go version.
expFQName, ok := expectedRuntimeMetrics[rmName]
if !ok {
t.Errorf("found new runtime/metrics metric %s", rmName)
@ -268,6 +297,7 @@ func TestExpectedRuntimeMetrics(t *testing.T) {
continue
}
}
// Now iterate over the expected metrics and look for removals.
cardinality := 0
for rmName, fqName := range expectedRuntimeMetrics {