Merge pull request #78 from prometheus/beorn7/histogram

Histograms for the exposition library.
This commit is contained in:
Björn Rabenstein 2015-02-19 16:16:15 +01:00
commit 38dbb2e268
13 changed files with 921 additions and 80 deletions

View File

@ -16,6 +16,7 @@ package extraction
import ( import (
"fmt" "fmt"
"io" "io"
"math"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
@ -85,7 +86,10 @@ func extractCounter(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error
continue continue
} }
sample := new(model.Sample) sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Counter.GetValue()),
}
samples = append(samples, sample) samples = append(samples, sample)
if m.TimestampMs != nil { if m.TimestampMs != nil {
@ -93,16 +97,12 @@ func extractCounter(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error
} else { } else {
sample.Timestamp = o.Timestamp sample.Timestamp = o.Timestamp
} }
sample.Metric = model.Metric{}
metric := sample.Metric
metric := sample.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
sample.Value = model.SampleValue(m.Counter.GetValue())
} }
return out.Ingest(samples) return out.Ingest(samples)
@ -116,7 +116,10 @@ func extractGauge(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
continue continue
} }
sample := new(model.Sample) sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Gauge.GetValue()),
}
samples = append(samples, sample) samples = append(samples, sample)
if m.TimestampMs != nil { if m.TimestampMs != nil {
@ -124,16 +127,12 @@ func extractGauge(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
} else { } else {
sample.Timestamp = o.Timestamp sample.Timestamp = o.Timestamp
} }
sample.Metric = model.Metric{}
metric := sample.Metric
metric := sample.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
sample.Value = model.SampleValue(m.Gauge.GetValue())
} }
return out.Ingest(samples) return out.Ingest(samples)
@ -153,48 +152,50 @@ func extractSummary(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error
} }
for _, q := range m.Summary.Quantile { for _, q := range m.Summary.Quantile {
sample := new(model.Sample) sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(q.GetValue()),
Timestamp: timestamp,
}
samples = append(samples, sample) samples = append(samples, sample)
sample.Timestamp = timestamp
sample.Metric = model.Metric{}
metric := sample.Metric metric := sample.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
// BUG(matt): Update other names to "quantile". // BUG(matt): Update other names to "quantile".
metric[model.LabelName("quantile")] = model.LabelValue(fmt.Sprint(q.GetQuantile())) metric[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
sample.Value = model.SampleValue(q.GetValue())
} }
if m.Summary.SampleSum != nil { if m.Summary.SampleSum != nil {
sum := new(model.Sample) sum := &model.Sample{
sum.Timestamp = timestamp Metric: model.Metric{},
metric := model.Metric{} Value: model.SampleValue(m.Summary.GetSampleSum()),
Timestamp: timestamp,
}
samples = append(samples, sum)
metric := sum.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
sum.Metric = metric
sum.Value = model.SampleValue(m.Summary.GetSampleSum())
samples = append(samples, sum)
} }
if m.Summary.SampleCount != nil { if m.Summary.SampleCount != nil {
count := new(model.Sample) count := &model.Sample{
count.Timestamp = timestamp Metric: model.Metric{},
metric := model.Metric{} Value: model.SampleValue(m.Summary.GetSampleCount()),
Timestamp: timestamp,
}
samples = append(samples, count)
metric := count.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
count.Metric = metric
count.Value = model.SampleValue(m.Summary.GetSampleCount())
samples = append(samples, count)
} }
} }
@ -209,7 +210,10 @@ func extractUntyped(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error
continue continue
} }
sample := new(model.Sample) sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Untyped.GetValue()),
}
samples = append(samples, sample) samples = append(samples, sample)
if m.TimestampMs != nil { if m.TimestampMs != nil {
@ -217,16 +221,12 @@ func extractUntyped(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error
} else { } else {
sample.Timestamp = o.Timestamp sample.Timestamp = o.Timestamp
} }
sample.Metric = model.Metric{}
metric := sample.Metric
metric := sample.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName()) metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
sample.Value = model.SampleValue(m.Untyped.GetValue())
} }
return out.Ingest(samples) return out.Ingest(samples)
@ -245,49 +245,72 @@ func extractHistogram(out Ingester, o *ProcessOptions, f *dto.MetricFamily) erro
timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000) timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000)
} }
infSeen := false
for _, q := range m.Histogram.Bucket { for _, q := range m.Histogram.Bucket {
sample := new(model.Sample) sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(q.GetCumulativeCount()),
Timestamp: timestamp,
}
samples = append(samples, sample) samples = append(samples, sample)
sample.Timestamp = timestamp
sample.Metric = model.Metric{}
metric := sample.Metric metric := sample.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.LabelName("le")] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) metric[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
sample.Value = model.SampleValue(q.GetCumulativeCount()) if math.IsInf(q.GetUpperBound(), +1) {
infSeen = true
}
} }
// TODO: If +Inf bucket is missing, add it.
if m.Histogram.SampleSum != nil { if m.Histogram.SampleSum != nil {
sum := new(model.Sample) sum := &model.Sample{
sum.Timestamp = timestamp Metric: model.Metric{},
metric := model.Metric{} Value: model.SampleValue(m.Histogram.GetSampleSum()),
Timestamp: timestamp,
}
samples = append(samples, sum)
metric := sum.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
sum.Metric = metric
sum.Value = model.SampleValue(m.Histogram.GetSampleSum())
samples = append(samples, sum)
} }
if m.Histogram.SampleCount != nil { if m.Histogram.SampleCount != nil {
count := new(model.Sample) count := &model.Sample{
count.Timestamp = timestamp Metric: model.Metric{},
metric := model.Metric{} Value: model.SampleValue(m.Histogram.GetSampleCount()),
Timestamp: timestamp,
}
samples = append(samples, count)
metric := count.Metric
for _, p := range m.Label { for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
} }
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
count.Metric = metric
count.Value = model.SampleValue(m.Histogram.GetSampleCount()) if !infSeen {
samples = append(samples, count) infBucket := &model.Sample{
Metric: model.Metric{},
Value: count.Value,
Timestamp: timestamp,
}
samples = append(samples, infBucket)
metric := infBucket.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
}
} }
} }

View File

@ -33,6 +33,14 @@ const (
// JobLabel is the label name indicating the job from which a timeseries // JobLabel is the label name indicating the job from which a timeseries
// was scraped. // was scraped.
JobLabel LabelName = "job" JobLabel LabelName = "job"
// BucketLabel is used for the label that defines the upper bound of a
// bucket of a histogram ("le" -> "less or equal").
BucketLabel = "le"
// QuantileLabel is used for the label that defines the quantile in a
// summary.
QuantileLabel = "quantile"
) )
// A LabelName is a key for a LabelSet or Metric. It has a value associated // A LabelName is a key for a LabelSet or Metric. It has a value associated

View File

@ -129,3 +129,31 @@ func BenchmarkSummaryNoLabels(b *testing.B) {
m.Observe(3.1415) m.Observe(3.1415)
} }
} }
func BenchmarkHistogramWithLabelValues(b *testing.B) {
m := NewHistogramVec(
HistogramOpts{
Name: "benchmark_histogram",
Help: "A histogram to benchmark it.",
},
[]string{"one", "two", "three"},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
}
}
func BenchmarkHistogramNoLabels(b *testing.B) {
m := NewHistogram(HistogramOpts{
Name: "benchmark_histogram",
Help: "A histogram to benchmark it.",
},
)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Observe(3.1415)
}
}

View File

@ -74,7 +74,7 @@ func (c *counter) Add(v float64) {
// CounterVec is a Collector that bundles a set of Counters that all share the // CounterVec is a Collector that bundles a set of Counters that all share the
// same Desc, but have different values for their variable labels. This is used // same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions // if you want to count the same thing partitioned by various dimensions
// (e.g. number of http requests, partitioned by response code and // (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec. // method). Create instances with NewCounterVec.
// //
// CounterVec embeds MetricVec. See there for a full list of methods with // CounterVec embeds MetricVec. See there for a full list of methods with

View File

@ -129,7 +129,7 @@ func ExampleCounterVec() {
httpReqs := prometheus.NewCounterVec( httpReqs := prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Name: "http_requests_total", Name: "http_requests_total",
Help: "How many HTTP requests processed, partitioned by status code and http method.", Help: "How many HTTP requests processed, partitioned by status code and HTTP method.",
ConstLabels: prometheus.Labels{"env": *binaryVersion}, ConstLabels: prometheus.Labels{"env": *binaryVersion},
}, },
[]string{"code", "method"}, []string{"code", "method"},
@ -200,7 +200,7 @@ func ExampleRegister() {
fmt.Println("taskCounter registered.") fmt.Println("taskCounter registered.")
} }
// Don't forget to tell the HTTP server about the Prometheus handler. // Don't forget to tell the HTTP server about the Prometheus handler.
// (In a real program, you still need to start the http server...) // (In a real program, you still need to start the HTTP server...)
http.Handle("/metrics", prometheus.Handler()) http.Handle("/metrics", prometheus.Handler())
// Now you can start workers and give every one of them a pointer to // Now you can start workers and give every one of them a pointer to
@ -240,7 +240,7 @@ func ExampleRegister() {
// Prometheus will not allow you to ever export metrics with // Prometheus will not allow you to ever export metrics with
// inconsistent help strings or label names. After unregistering, the // inconsistent help strings or label names. After unregistering, the
// unregistered metrics will cease to show up in the /metrics http // unregistered metrics will cease to show up in the /metrics HTTP
// response, but the registry still remembers that those metrics had // response, but the registry still remembers that those metrics had
// been exported before. For this example, we will now choose a // been exported before. For this example, we will now choose a
// different name. (In a real program, you would obviously not export // different name. (In a real program, you would obviously not export
@ -452,3 +452,49 @@ func ExampleSummaryVec() {
// > // >
// ] // ]
} }
func ExampleHistogram() {
temps := prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "pond_temperature_celsius",
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide.
})
// Simulate some observations.
for i := 0; i < 1000; i++ {
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
}
// Just for demonstration, let's check the state of the histogram by
// (ab)using its Write method (which is usually only used by Prometheus
// internally).
metric := &dto.Metric{}
temps.Write(metric)
fmt.Println(proto.MarshalTextString(metric))
// Output:
// histogram: <
// sample_count: 1000
// sample_sum: 29969.50000000001
// bucket: <
// cumulative_count: 192
// upper_bound: 20
// >
// bucket: <
// cumulative_count: 366
// upper_bound: 25
// >
// bucket: <
// cumulative_count: 501
// upper_bound: 30
// >
// bucket: <
// cumulative_count: 638
// upper_bound: 35
// >
// bucket: <
// cumulative_count: 816
// upper_bound: 40
// >
// >
}

336
prometheus/histogram.go Normal file
View File

@ -0,0 +1,336 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sync/atomic"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/model"
dto "github.com/prometheus/client_model/go"
)
// A Histogram counts individual observations from an event or sample stream in
// configurable buckets. Similar to a summary, it also provides a sum of
// observations and an observation count.
//
// On the Prometheus server, quantiles can be calculated from a Histogram using
// the histogram_quantile function in the query language.
//
// Note that Histograms, in contrast to Summaries, can be aggregated with the
// Prometheus query language (see the documentation for detailed
// procedures). However, Histograms require the user to pre-define suitable
// buckets, and they are in general less accurate. The Observe method of a
// Histogram has a very low performance overhead in comparison with the Observe
// method of a Summary.
//
// To create Histogram instances, use NewHistogram.
type Histogram interface {
Metric
Collector
// Observe adds a single observation to the histogram.
Observe(float64)
}
var (
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a
// network service. Most likely, however, you will be required to define
// buckets customized to your use case.
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
"%q is not allowed as label name in histograms", model.BucketLabel,
)
)
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is zero or negative.
func LinearBuckets(start, width float64, count int) []float64 {
if count < 1 {
panic("LinearBuckets needs a positive count")
}
buckets := make([]float64, count)
for i := range buckets {
buckets[i] = start
start += width
}
return buckets
}
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
// upper bound of 'start' and each following bucket's upper bound is 'factor'
// times the previous bucket's upper bound. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
// or if 'factor' is less than or equal 1.
func ExponentialBuckets(start, factor float64, count int) []float64 {
if count < 1 {
panic("ExponentialBuckets needs a positive count")
}
if start <= 0 {
panic("ExponentialBuckets needs a positive start value")
}
if factor <= 1 {
panic("ExponentialBuckets needs a factor greater than 1")
}
buckets := make([]float64, count)
for i := range buckets {
buckets[i] = start
start *= factor
}
return buckets
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are
// optional and can safely be left at their zero value.
type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the Histogram must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this Histogram. Mandatory!
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this
// Histogram. Histograms with the same fully-qualified name must have the
// same label names in their ConstLabels.
//
// Note that in most cases, labels have a value that varies during the
// lifetime of a process. Those labels are usually managed with a
// HistogramVec. ConstLabels serve only special purposes. One is for the
// special case where the value of a label does not change during the
// lifetime of a process, e.g. if the revision of the running binary is
// put into a label. Another, more advanced purpose is if more than one
// Collector needs to collect Histograms with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
// implicitly. The default value is DefObjectives.
Buckets []float64
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
func NewHistogram(opts HistogramOpts) Histogram {
return newHistogram(
NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
),
opts,
)
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
panic(errInconsistentCardinality)
}
for _, n := range desc.variableLabels {
if n == model.BucketLabel {
panic(errBucketLabelNotAllowed)
}
}
for _, lp := range desc.constLabelPairs {
if lp.GetName() == model.BucketLabel {
panic(errBucketLabelNotAllowed)
}
}
if len(opts.Buckets) == 0 {
opts.Buckets = DefBuckets
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
if upperBound >= h.upperBounds[i+1] {
panic(fmt.Errorf(
"histogram buckets must be in increasing order: %f >= %f",
upperBound, h.upperBounds[i+1],
))
}
} else {
if math.IsInf(upperBound, +1) {
// The +Inf bucket is implicit. Remove it here.
h.upperBounds = h.upperBounds[:i]
}
}
}
// Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds))
h.Init(h) // Init self-collection.
return h
}
type histogram struct {
SelfCollector
// Note that there is no mutex required.
desc *Desc
upperBounds []float64
counts []uint64
labelPairs []*dto.LabelPair
sumBits uint64 // The bits of the float64 representing the sum of all observations.
count uint64
}
func (h *histogram) Desc() *Desc {
return h.desc
}
func (h *histogram) Observe(v float64) {
for i, upperBound := range h.upperBounds {
if v <= upperBound {
atomic.AddUint64(&h.counts[i], 1)
break
}
}
atomic.AddUint64(&h.count, 1)
for {
oldBits := atomic.LoadUint64(&h.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
break
}
}
}
func (h *histogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
buckets := make([]*dto.Bucket, len(h.upperBounds))
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
var count uint64
for i, upperBound := range h.upperBounds {
count += atomic.LoadUint64(&h.counts[i])
buckets[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(upperBound),
}
}
his.Bucket = buckets
out.Histogram = his
out.Label = h.labelPairs
return nil
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names. At least one label name must be
// provided.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
},
},
}
}
// GetMetricWithLabelValues replaces the method of the same name in
// MetricVec. The difference is that this method returns a Histogram and not a
// Metric so that no type conversion is required.
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Histogram), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
// difference is that this method returns a Histogram and not a Metric so that no
// type conversion is required.
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Histogram), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
return m.MetricVec.WithLabelValues(lvs...).(Histogram)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *HistogramVec) With(labels Labels) Histogram {
return m.MetricVec.With(labels).(Histogram)
}

View File

@ -0,0 +1,318 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"math/rand"
"reflect"
"sort"
"sync"
"testing"
"testing/quick"
dto "github.com/prometheus/client_model/go"
)
func benchmarkHistogramObserve(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewHistogram(HistogramOpts{})
for i := 0; i < w; i++ {
go func() {
g.Wait()
for i := 0; i < b.N; i++ {
s.Observe(float64(i))
}
wg.Done()
}()
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkHistogramObserve1(b *testing.B) {
benchmarkHistogramObserve(1, b)
}
func BenchmarkHistogramObserve2(b *testing.B) {
benchmarkHistogramObserve(2, b)
}
func BenchmarkHistogramObserve4(b *testing.B) {
benchmarkHistogramObserve(4, b)
}
func BenchmarkHistogramObserve8(b *testing.B) {
benchmarkHistogramObserve(8, b)
}
func benchmarkHistogramWrite(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewHistogram(HistogramOpts{})
for i := 0; i < 1000000; i++ {
s.Observe(float64(i))
}
for j := 0; j < w; j++ {
outs := make([]dto.Metric, b.N)
go func(o []dto.Metric) {
g.Wait()
for i := 0; i < b.N; i++ {
s.Write(&o[i])
}
wg.Done()
}(outs)
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkHistogramWrite1(b *testing.B) {
benchmarkHistogramWrite(1, b)
}
func BenchmarkHistogramWrite2(b *testing.B) {
benchmarkHistogramWrite(2, b)
}
func BenchmarkHistogramWrite4(b *testing.B) {
benchmarkHistogramWrite(4, b)
}
func BenchmarkHistogramWrite8(b *testing.B) {
benchmarkHistogramWrite(8, b)
}
// Intentionally adding +Inf here to test if that case is handled correctly.
// Also, getCumulativeCounts depends on it.
var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
func TestHistogramConcurrency(t *testing.T) {
rand.Seed(42)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%5 + 1)
total := mutations * concLevel
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sum := NewHistogram(HistogramOpts{
Name: "test_histogram",
Help: "helpless",
Buckets: testBuckets,
})
allVars := make([]float64, total)
var sampleSum float64
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
allVars[i*mutations+j] = v
sampleSum += v
}
go func(vals []float64) {
start.Wait()
for _, v := range vals {
sum.Observe(v)
}
end.Done()
}(vals)
}
sort.Float64s(allVars)
start.Done()
end.Wait()
m := &dto.Metric{}
sum.Write(m)
if got, want := int(*m.Histogram.SampleCount), total; got != want {
t.Errorf("got sample count %d, want %d", got, want)
}
if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f, want %f", got, want)
}
wantCounts := getCumulativeCounts(allVars)
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
t.Errorf("got %d buckets in protobuf, want %d", got, want)
}
for i, wantBound := range testBuckets {
if i == len(testBuckets)-1 {
break // No +Inf bucket in protobuf.
}
if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {
t.Errorf("got bound %f, want %f", gotBound, wantBound)
}
if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {
t.Errorf("got count %d, want %d", gotCount, wantCount)
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func TestHistogramVecConcurrency(t *testing.T) {
rand.Seed(42)
objectives := make([]float64, 0, len(DefObjectives))
for qu := range DefObjectives {
objectives = append(objectives, qu)
}
sort.Float64s(objectives)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%7 + 1)
vecLength := int(n%3 + 1)
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
his := NewHistogramVec(
HistogramOpts{
Name: "test_histogram",
Help: "helpless",
Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},
},
[]string{"label"},
)
allVars := make([][]float64, vecLength)
sampleSums := make([]float64, vecLength)
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
picks := make([]int, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
pick := rand.Intn(vecLength)
picks[j] = pick
allVars[pick] = append(allVars[pick], v)
sampleSums[pick] += v
}
go func(vals []float64) {
start.Wait()
for i, v := range vals {
his.WithLabelValues(string('A' + picks[i])).Observe(v)
}
end.Done()
}(vals)
}
for _, vars := range allVars {
sort.Float64s(vars)
}
start.Done()
end.Wait()
for i := 0; i < vecLength; i++ {
m := &dto.Metric{}
s := his.WithLabelValues(string('A' + i))
s.Write(m)
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
t.Errorf("got %d buckets in protobuf, want %d", got, want)
}
if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {
t.Errorf("got sample count %d, want %d", got, want)
}
if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f, want %f", got, want)
}
wantCounts := getCumulativeCounts(allVars[i])
for j, wantBound := range testBuckets {
if j == len(testBuckets)-1 {
break // No +Inf bucket in protobuf.
}
if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {
t.Errorf("got bound %f, want %f", gotBound, wantBound)
}
if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {
t.Errorf("got count %d, want %d", gotCount, wantCount)
}
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func getCumulativeCounts(vars []float64) []uint64 {
counts := make([]uint64, len(testBuckets))
for _, v := range vars {
for i := len(testBuckets) - 1; i >= 0; i-- {
if v > testBuckets[i] {
break
}
counts[i]++
}
}
return counts
}
func TestBuckets(t *testing.T) {
got := LinearBuckets(-15, 5, 6)
want := []float64{-15, -10, -5, 0, 5, 10}
if !reflect.DeepEqual(got, want) {
t.Errorf("linear buckets: got %v, want %v", got, want)
}
got = ExponentialBuckets(100, 1.2, 3)
want = []float64{100, 120, 144}
if !reflect.DeepEqual(got, want) {
t.Errorf("linear buckets: got %v, want %v", got, want)
}
}

View File

@ -47,7 +47,7 @@ func nowSeries(t ...time.Time) nower {
} }
// InstrumentHandler wraps the given HTTP handler for instrumentation. It // InstrumentHandler wraps the given HTTP handler for instrumentation. It
// registers four metric collectors (if not already done) and reports http // registers four metric collectors (if not already done) and reports HTTP
// metrics to the (newly or already) registered collectors: http_requests_total // metrics to the (newly or already) registered collectors: http_requests_total
// (CounterVec), http_request_duration_microseconds (Summary), // (CounterVec), http_request_duration_microseconds (Summary),
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each // http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each

View File

@ -171,7 +171,7 @@ func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
} }
// PanicOnCollectError sets the behavior whether a panic is caused upon an error // PanicOnCollectError sets the behavior whether a panic is caused upon an error
// while metrics are collected and served to the http endpoint. By default, an // while metrics are collected and served to the HTTP endpoint. By default, an
// internal server error (status code 500) is served with an error message. // internal server error (status code 500) is served with an error message.
func PanicOnCollectError(b bool) { func PanicOnCollectError(b bool) {
defRegistry.panicOnCollectError = b defRegistry.panicOnCollectError = b

View File

@ -25,6 +25,7 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/_vendor/perks/quantile" "github.com/prometheus/client_golang/_vendor/perks/quantile"
"github.com/prometheus/client_golang/model"
) )
// A Summary captures individual observations from an event or sample stream and // A Summary captures individual observations from an event or sample stream and
@ -35,6 +36,12 @@ import (
// Summary provides the median, the 90th and the 99th percentile of the latency // Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations. // as rank estimations.
// //
// Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
// queries served across all instances of a service), consider the Histogram
// metric type. See the Prometheus documentation for more details.
//
// To create Summary instances, use NewSummary. // To create Summary instances, use NewSummary.
type Summary interface { type Summary interface {
Metric Metric
@ -44,9 +51,13 @@ type Summary interface {
Observe(float64) Observe(float64)
} }
// DefObjectives are the default Summary quantile values.
var ( var (
// DefObjectives are the default Summary quantile values.
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
"%q is not allowed as label name in summaries", model.QuantileLabel,
)
) )
// Default values for SummaryOpts. // Default values for SummaryOpts.
@ -110,7 +121,10 @@ type SummaryOpts struct {
// AgeBuckets is the number of buckets used to exclude observations that // AgeBuckets is the number of buckets used to exclude observations that
// are older than MaxAge from the summary. A higher number has a // are older than MaxAge from the summary. A higher number has a
// resource penalty, so only increase it if the higher resolution is // resource penalty, so only increase it if the higher resolution is
// really required. The default value is DefAgeBuckets. // really required. For very high observation rates, you might want to
// reduce the number of age buckets. With only one age bucket, you will
// effectively see a complete reset of the summary each time MaxAge has
// passed. The default value is DefAgeBuckets.
AgeBuckets uint32 AgeBuckets uint32
// BufCap defines the default sample stream buffer size. The default // BufCap defines the default sample stream buffer size. The default
@ -119,10 +133,6 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package // is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile"). // "github.com/bmizerany/perks/quantile").
BufCap uint32 BufCap uint32
// Epsilon is the error epsilon for the quantile rank estimate. Must be
// positive. The default is DefEpsilon.
Epsilon float64
} }
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge // TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
@ -158,6 +168,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
panic(errInconsistentCardinality) panic(errInconsistentCardinality)
} }
for _, n := range desc.variableLabels {
if n == model.QuantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
for _, lp := range desc.constLabelPairs {
if lp.GetName() == model.QuantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
if len(opts.Objectives) == 0 { if len(opts.Objectives) == 0 {
opts.Objectives = DefObjectives opts.Objectives = DefObjectives
} }
@ -358,7 +379,7 @@ func (s quantSort) Less(i, j int) bool {
// SummaryVec is a Collector that bundles a set of Summaries that all share the // SummaryVec is a Collector that bundles a set of Summaries that all share the
// same Desc, but have different values for their variable labels. This is used // same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions // if you want to count the same thing partitioned by various dimensions
// (e.g. http request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec. // instances with NewSummaryVec.
type SummaryVec struct { type SummaryVec struct {
MetricVec MetricVec
@ -411,14 +432,14 @@ func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like // error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42) // myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
return m.MetricVec.WithLabelValues(lvs...).(Summary) return m.MetricVec.WithLabelValues(lvs...).(Summary)
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *SummaryVec) With(labels Labels) Summary { func (m *SummaryVec) With(labels Labels) Summary {
return m.MetricVec.With(labels).(Summary) return m.MetricVec.With(labels).(Summary)
} }

View File

@ -24,8 +24,10 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"math"
"strings" "strings"
"github.com/prometheus/client_golang/model"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
@ -116,7 +118,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
for _, q := range metric.Summary.Quantile { for _, q := range metric.Summary.Quantile {
n, err = writeSample( n, err = writeSample(
name, metric, name, metric,
"quantile", fmt.Sprint(q.GetQuantile()), model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
q.GetValue(), q.GetValue(),
out, out,
) )
@ -145,10 +147,11 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
"expected summary in metric %s", metric, "expected summary in metric %s", metric,
) )
} }
infSeen := false
for _, q := range metric.Histogram.Bucket { for _, q := range metric.Histogram.Bucket {
n, err = writeSample( n, err = writeSample(
name+"_bucket", metric, name+"_bucket", metric,
"le", fmt.Sprint(q.GetUpperBound()), model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
float64(q.GetCumulativeCount()), float64(q.GetCumulativeCount()),
out, out,
) )
@ -156,7 +159,21 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
if err != nil { if err != nil {
return written, err return written, err
} }
// TODO: Add +inf bucket if it's missing. if math.IsInf(q.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeSample(
name+"_bucket", metric,
model.BucketLabel, "+Inf",
float64(metric.Histogram.GetSampleCount()),
out,
)
if err != nil {
return written, err
}
written += n
} }
n, err = writeSample( n, err = writeSample(
name+"_sum", metric, "", "", name+"_sum", metric, "", "",

View File

@ -267,6 +267,50 @@ request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693 request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06 request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693 request_duration_microseconds_count 2693
`,
},
// 5: Histogram with missing +Inf bucket.
{
in: &dto.MetricFamily{
Name: proto.String("request_duration_microseconds"),
Help: proto.String("The response latency."),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(2693),
SampleSum: proto.Float64(1756047.3),
Bucket: []*dto.Bucket{
&dto.Bucket{
UpperBound: proto.Float64(100),
CumulativeCount: proto.Uint64(123),
},
&dto.Bucket{
UpperBound: proto.Float64(120),
CumulativeCount: proto.Uint64(412),
},
&dto.Bucket{
UpperBound: proto.Float64(144),
CumulativeCount: proto.Uint64(592),
},
&dto.Bucket{
UpperBound: proto.Float64(172.8),
CumulativeCount: proto.Uint64(1524),
},
},
},
},
},
},
out: `# HELP request_duration_microseconds The response latency.
# TYPE request_duration_microseconds histogram
request_duration_microseconds_bucket{le="100"} 123
request_duration_microseconds_bucket{le="120"} 412
request_duration_microseconds_bucket{le="144"} 592
request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693
`, `,
}, },
} }

View File

@ -274,8 +274,8 @@ func (p *Parser) startLabelName() stateFn {
} }
// Special summary/histogram treatment. Don't add 'quantile' and 'le' // Special summary/histogram treatment. Don't add 'quantile' and 'le'
// labels to 'real' labels. // labels to 'real' labels.
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == "quantile") && if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == "le") { !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
} }
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
@ -306,7 +306,7 @@ func (p *Parser) startLabelValue() stateFn {
// - Quantile labels are special, will result in dto.Quantile later. // - Quantile labels are special, will result in dto.Quantile later.
// - Other labels have to be added to currentLabels for signature calculation. // - Other labels have to be added to currentLabels for signature calculation.
if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentMF.GetType() == dto.MetricType_SUMMARY {
if p.currentLabelPair.GetName() == "quantile" { if p.currentLabelPair.GetName() == model.QuantileLabel {
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
// Create a more helpful error message. // Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
@ -318,7 +318,7 @@ func (p *Parser) startLabelValue() stateFn {
} }
// Similar special treatment of histograms. // Similar special treatment of histograms.
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
if p.currentLabelPair.GetName() == "le" { if p.currentLabelPair.GetName() == model.BucketLabel {
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
// Create a more helpful error message. // Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))