2015-02-02 17:14:36 +03:00
|
|
|
// Copyright 2014 The Prometheus Authors
|
2014-05-07 22:08:33 +04:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package prometheus
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-03-15 15:58:14 +03:00
|
|
|
"math"
|
2014-05-07 22:08:33 +04:00
|
|
|
"sort"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2015-02-27 18:12:59 +03:00
|
|
|
"github.com/beorn7/perks/quantile"
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
|
|
|
|
dto "github.com/prometheus/client_model/go"
|
2014-05-07 22:08:33 +04:00
|
|
|
)
|
|
|
|
|
2015-08-23 14:51:32 +03:00
|
|
|
// quantileLabel is used for the label that defines the quantile in a
|
|
|
|
// summary.
|
|
|
|
const quantileLabel = "quantile"
|
|
|
|
|
2014-05-07 22:08:33 +04:00
|
|
|
// A Summary captures individual observations from an event or sample stream and
|
|
|
|
// summarizes them in a manner similar to traditional summary statistics: 1. sum
|
|
|
|
// of observations, 2. observation count, 3. rank estimations.
|
|
|
|
//
|
|
|
|
// A typical use-case is the observation of request latencies. By default, a
|
|
|
|
// Summary provides the median, the 90th and the 99th percentile of the latency
|
|
|
|
// as rank estimations.
|
|
|
|
//
|
2015-02-18 21:23:34 +03:00
|
|
|
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
|
|
|
// the Prometheus query language (i.e. you cannot average or add them). If you
|
|
|
|
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
|
2015-02-19 17:34:04 +03:00
|
|
|
// queries served across all instances of a service), consider the Histogram
|
2015-02-18 21:23:34 +03:00
|
|
|
// metric type. See the Prometheus documentation for more details.
|
|
|
|
//
|
2014-05-07 22:08:33 +04:00
|
|
|
// To create Summary instances, use NewSummary.
|
|
|
|
type Summary interface {
|
|
|
|
Metric
|
|
|
|
Collector
|
|
|
|
|
|
|
|
// Observe adds a single observation to the summary.
|
|
|
|
Observe(float64)
|
|
|
|
}
|
|
|
|
|
2016-08-03 13:50:39 +03:00
|
|
|
// DefObjectives are the default Summary quantile values.
|
2016-11-23 18:08:11 +03:00
|
|
|
//
|
|
|
|
// Deprecated: DefObjectives will not be used as the default objectives in
|
|
|
|
// v0.10 of the library. The default Summary will have no quantiles then.
|
2014-05-07 22:08:33 +04:00
|
|
|
var (
|
2015-01-20 20:27:10 +03:00
|
|
|
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
2015-02-19 17:31:43 +03:00
|
|
|
|
|
|
|
errQuantileLabelNotAllowed = fmt.Errorf(
|
2015-08-23 14:51:32 +03:00
|
|
|
"%q is not allowed as label name in summaries", quantileLabel,
|
2015-02-19 17:31:43 +03:00
|
|
|
)
|
2014-05-07 22:08:33 +04:00
|
|
|
)
|
|
|
|
|
|
|
|
// Default values for SummaryOpts.
|
|
|
|
const (
|
|
|
|
// DefMaxAge is the default duration for which observations stay
|
|
|
|
// relevant.
|
|
|
|
DefMaxAge time.Duration = 10 * time.Minute
|
|
|
|
// DefAgeBuckets is the default number of buckets used to calculate the
|
|
|
|
// age of observations.
|
2015-01-21 15:44:43 +03:00
|
|
|
DefAgeBuckets = 5
|
2014-05-07 22:08:33 +04:00
|
|
|
// DefBufCap is the standard buffer size for collecting Summary observations.
|
|
|
|
DefBufCap = 500
|
|
|
|
)
|
|
|
|
|
|
|
|
// SummaryOpts bundles the options for creating a Summary metric. It is
|
|
|
|
// mandatory to set Name and Help to a non-empty string. All other fields are
|
|
|
|
// optional and can safely be left at their zero value.
|
|
|
|
type SummaryOpts struct {
|
|
|
|
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|
|
|
// name of the Summary (created by joining these components with
|
|
|
|
// "_"). Only Name is mandatory, the others merely help structuring the
|
|
|
|
// name. Note that the fully-qualified name of the Summary must be a
|
|
|
|
// valid Prometheus metric name.
|
|
|
|
Namespace string
|
|
|
|
Subsystem string
|
|
|
|
Name string
|
|
|
|
|
|
|
|
// Help provides information about this Summary. Mandatory!
|
|
|
|
//
|
|
|
|
// Metrics with the same fully-qualified name must have the same Help
|
|
|
|
// string.
|
|
|
|
Help string
|
|
|
|
|
|
|
|
// ConstLabels are used to attach fixed labels to this
|
|
|
|
// Summary. Summaries with the same fully-qualified name must have the
|
|
|
|
// same label names in their ConstLabels.
|
|
|
|
//
|
|
|
|
// Note that in most cases, labels have a value that varies during the
|
|
|
|
// lifetime of a process. Those labels are usually managed with a
|
|
|
|
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
|
|
|
// special case where the value of a label does not change during the
|
|
|
|
// lifetime of a process, e.g. if the revision of the running binary is
|
|
|
|
// put into a label. Another, more advanced purpose is if more than one
|
|
|
|
// Collector needs to collect Summaries with the same fully-qualified
|
|
|
|
// name. In that case, those Summaries must differ in the values of
|
|
|
|
// their ConstLabels. See the Collector examples.
|
|
|
|
//
|
|
|
|
// If the value of a label never changes (not even between binaries),
|
|
|
|
// that label most likely should not be a label at all (but part of the
|
|
|
|
// metric name).
|
|
|
|
ConstLabels Labels
|
|
|
|
|
2015-01-20 20:27:10 +03:00
|
|
|
// Objectives defines the quantile rank estimates with their respective
|
2016-11-23 18:08:11 +03:00
|
|
|
// absolute error. If Objectives[q] = e, then the value reported for q
|
|
|
|
// will be the φ-quantile value for some φ between q-e and q+e. The
|
|
|
|
// default value is DefObjectives. It is used if Objectives is left at
|
|
|
|
// its zero value (i.e. nil). To create a Summary without Objectives,
|
|
|
|
// set it to an empty map (i.e. map[float64]float64{}).
|
|
|
|
//
|
|
|
|
// Deprecated: Note that the current value of DefObjectives is
|
|
|
|
// deprecated. It will be replaced by an empty map in v0.10 of the
|
|
|
|
// library. Please explicitly set Objectives to the desired value.
|
2015-01-20 20:27:10 +03:00
|
|
|
Objectives map[float64]float64
|
2014-05-07 22:08:33 +04:00
|
|
|
|
|
|
|
// MaxAge defines the duration for which an observation stays relevant
|
|
|
|
// for the summary. Must be positive. The default value is DefMaxAge.
|
|
|
|
MaxAge time.Duration
|
|
|
|
|
|
|
|
// AgeBuckets is the number of buckets used to exclude observations that
|
|
|
|
// are older than MaxAge from the summary. A higher number has a
|
|
|
|
// resource penalty, so only increase it if the higher resolution is
|
2015-02-18 21:23:34 +03:00
|
|
|
// really required. For very high observation rates, you might want to
|
|
|
|
// reduce the number of age buckets. With only one age bucket, you will
|
|
|
|
// effectively see a complete reset of the summary each time MaxAge has
|
|
|
|
// passed. The default value is DefAgeBuckets.
|
2014-05-07 22:08:33 +04:00
|
|
|
AgeBuckets uint32
|
|
|
|
|
|
|
|
// BufCap defines the default sample stream buffer size. The default
|
|
|
|
// value of DefBufCap should suffice for most uses. If there is a need
|
|
|
|
// to increase the value, a multiple of 500 is recommended (because that
|
|
|
|
// is the internal buffer size of the underlying package
|
|
|
|
// "github.com/bmizerany/perks/quantile").
|
|
|
|
BufCap uint32
|
|
|
|
}
|
|
|
|
|
Create a public registry interface and separate out HTTP exposition
General context and approch
===========================
This is the first part of the long awaited wider refurbishment of
`client_golang/prometheus/...`. After a lot of struggling, I decided
to not go for one breaking big-bang, but cut things into smaller steps
after all, mostly to keep the changes manageable and easy to
review. I'm aiming for having the invasive breaking changes
concentrated in as few steps as possible (ideally one). Some steps
will not be breaking at all, but typically there will be breaking
changes that only affect quite special cases so that 95+% of users
will not be affected. This first step is an example for that, see
details below.
What's happening in this commit?
================================
This step is about finally creating an exported registry
interface. This could not be done by simply export the existing
internal implementation because the interface would be _way_ too
fat. This commit introduces a qutie lean `Registry` interface
(compared to the previous interval implementation). The functions that
act on the default registry are retained (with very few exceptions) so
that most use cases won't see a change. However, several of those are
deprecated now to clean up the namespace in the future.
The default registry is kept in the public variable
`DefaultRegistry`. This follows the example of the http package in the
standard library (cf. `http.DefaultServeMux`, `http.DefaultClient`)
with the same implications. (This pattern is somewhat disputed within
the Go community but I chose to go with the devil you know instead of
creating something more complex or even disallowing any changes to the
default registry. The current approach gives everybody the freedom to
not touch DefaultRegistry or to do everything with a custom registry
to play save.)
Another important part in making the registry lean is the extraction
of the HTTP exposition, which also allows for customization of the
HTTP exposition. Note that the separation of metric collection and
exposition has the side effect that managing the MetricFamily and
Metric protobuf objects in a free-list or pool isn't really feasible
anymore. By now (with better GC in more recent Go versions), the
returns were anyway dimisishing. To be effective at all, scrapes had
to happen more often than GC cycles, and even then most elements of
the protobufs (everything excetp the MetricFamily and Metric structs
themselves) would still cause allocation churn. In a future breaking
change, the signature of the Write method in the Metric interface will
be adjusted accordingly. In this commit, avoiding breakage is more
important.
The following issues are fixed by this commit (some solved "on the
fly" now that I was touching the code anyway and it would have been
stupid to port the bugs):
https://github.com/prometheus/client_golang/issues/46
https://github.com/prometheus/client_golang/issues/100
https://github.com/prometheus/client_golang/issues/170
https://github.com/prometheus/client_golang/issues/205
Documentation including examples have been amended as required.
What future changes does this commit enable?
============================================
The following items are not yet implemented, but this commit opens the
possibility of implementing these independently.
- The separation of the HTTP exposition allows the implementation of
other exposition methods based on the Registry interface, as known
from other Prometheus client libraries, e.g. sending the metrics to
Graphite.
Cf. https://github.com/prometheus/client_golang/issues/197
- The public `Registry` interface allows the implementation of
convenience tools for testing metrics collection. Those tools can
inspect the collected MetricFamily protobufs and compare them to
expectation. Also, tests can use their own testing instance of a
registry.
Cf. https://github.com/prometheus/client_golang/issues/58
Notable non-goals of this commit
================================
Non-goals that will be tackled later
------------------------------------
The following two issues are quite closely connected to the changes in
this commit but the line has been drawn deliberately to address them
in later steps of the refurbishment:
- `InstrumentHandler` has many known problems. The plan is to create a
saner way to conveniently intrument HTTP handlers and remove the old
`InstrumentHandler` altogether. To keep breakage low for now, even
the default handler to expose metrics is still using the old
`InstrumentHandler`. This leads to weird naming inconsistencies but
I have deemed it better to not break the world right now but do it
in the change that provides better ways of instrumenting HTTP
handlers.
Cf. https://github.com/prometheus/client_golang/issues/200
- There is work underway to make the whole handling of metric
descriptors (`Desc`) more intuitive and transparent for the user
(including an ability for less strict checking,
cf. https://github.com/prometheus/client_golang/issues/47). That's
quite invasive from the perspective of the internal code, namely the
registry. I deliberately kept those changes out of this commit.
- While this commit adds new external dependency, the effort to vendor
anything within the library that is not visible in any exported
types will have to be done later.
Non-goals that _might_ be tackled later
---------------------------------------
There is a strong and understandable urge to divide the `prometheus`
package into a number of sub-packages (like `registry`, `collectors`,
`http`, `metrics`, …). However, to not run into a multitude of
circular import chains, this would need to break every single existing
usage of the library. (As just one example, if the ubiquitious
`prometheus.MustRegister` (with more than 2,000 uses on GitHub alone)
is kept in the `prometheus` package, but the other registry concerns
go into a new `registry` package, then the `prometheus` package would
import the `registry` package (to call the actual register method),
while at the same time the `registry` package needs to import the
`prometheus` package to access `Collector`, `Metric`, `Desc` and
more. If we moved `MustRegister` into the `registry` package,
thousands of code lines would have to be fixed (which would be easy if
the world was a mono repo, but it is not). If we moved everything else
the proposed registry package needs into packages of their own, we
would break thousands of other code lines.)
The main problem is really the top-level functions like
`MustRegister`, `Handler`, …, which effectively pull everything into
one package. Those functions are however very convenient for the easy
and very frequent use-cases.
This problem has to be revisited later.
For now, I'm trying to keep the amount of exported names in the
package as low as possible (e.g. I unexported expvarCollector in this
commit because the NewExpvarCollector constructor is enough to export,
and it is now consistent with other collectors, like the goCollector).
Non-goals that won't be tackled anytime soon
--------------------------------------------
Something that I have played with a lot is "streaming collection",
i.e. allow an implementation of the `Registry` interface that collects
metrics incrementally and serves them while doing so. As it has turned
out, this has many many issues and makes the `Registry` interface very
clunky. Eventually, I made the call that it is unlikely we will really
implement streaming collection; and making the interface more clunky
for something that might not even happen is really a big no-no. Note
that the `Registry` interface only creates the in-memory
representation of the metric family protobufs in one go. The
serializaton onto the wire can still be handled in a streaming fashion
(which hasn't been done so far, without causing any trouble, but might
be done in the future without breaking any interfaces).
What are the breaking changes?
==============================
- Signatures of functions pushing to Pushgateway have changed to allow
arbitrary grouping (which was planned for a long time anyway, and
now that I had to work on the Push code anyway for the registry
refurbishment, I finally did it,
cf. https://github.com/prometheus/client_golang/issues/100).
With the gained insight that pushing to the default registry is almost
never the right thing, and now that we are breaking the Push call
anyway, all the Push functions were moved to their own package,
which cleans up the namespace and is more idiomatic (pushing
Collectors is now literally done by `push.Collectors(...)`).
- The registry is doing more consistency checks by default now. Past
creators of inconsistent metrics could have masked the problem by
not setting `EnableCollectChecks`. Those inconsistencies will now be
detected. (But note that a "best effort" metrics collection is now
possible with `HandlerOpts.ErrorHandling = ContinueOnError`.)
- `EnableCollectChecks` is gone. The registry is now performing some
of those checks anyway (see previous item), and a registry with all
of those checks can now be created with `NewPedanticRegistry` (only
used for testing).
- `PanicOnCollectError` is gone. This behavior can now be configured
when creating a custom HTTP handler.
2016-07-20 18:11:14 +03:00
|
|
|
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
|
|
|
// perk/quantile is actually not working as advertised - and it might be
|
|
|
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
|
|
|
// summaries in the first place. To avoid using Merge, we are currently adding
|
|
|
|
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
2015-01-21 15:44:43 +03:00
|
|
|
// essentially multiplied by the number of age buckets. When rotating age
|
|
|
|
// buckets, we empty the previous head stream. On scrape time, we simply take
|
|
|
|
// the quantiles from the head stream (no merging required). Result: More effort
|
|
|
|
// on observation time, less effort on scrape time, which is exactly the
|
|
|
|
// opposite of what we try to accomplish, but at least the results are correct.
|
|
|
|
//
|
|
|
|
// The quite elegant previous contraption to merge the age buckets efficiently
|
|
|
|
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
|
|
|
|
// can't be used anymore.
|
|
|
|
|
2014-05-07 22:08:33 +04:00
|
|
|
// NewSummary creates a new Summary based on the provided SummaryOpts.
|
|
|
|
func NewSummary(opts SummaryOpts) Summary {
|
|
|
|
return newSummary(
|
|
|
|
NewDesc(
|
|
|
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
|
|
opts.Help,
|
|
|
|
nil,
|
|
|
|
opts.ConstLabels,
|
|
|
|
),
|
|
|
|
opts,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
|
|
|
if len(desc.variableLabels) != len(labelValues) {
|
|
|
|
panic(errInconsistentCardinality)
|
|
|
|
}
|
|
|
|
|
2015-02-19 04:01:06 +03:00
|
|
|
for _, n := range desc.variableLabels {
|
2015-08-23 14:51:32 +03:00
|
|
|
if n == quantileLabel {
|
2015-02-19 17:31:43 +03:00
|
|
|
panic(errQuantileLabelNotAllowed)
|
2015-02-19 04:01:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, lp := range desc.constLabelPairs {
|
2015-08-23 14:51:32 +03:00
|
|
|
if lp.GetName() == quantileLabel {
|
2015-02-19 17:31:43 +03:00
|
|
|
panic(errQuantileLabelNotAllowed)
|
2015-02-19 04:01:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-23 18:08:11 +03:00
|
|
|
if opts.Objectives == nil {
|
2014-05-07 22:08:33 +04:00
|
|
|
opts.Objectives = DefObjectives
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.MaxAge < 0 {
|
|
|
|
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
|
|
|
|
}
|
|
|
|
if opts.MaxAge == 0 {
|
|
|
|
opts.MaxAge = DefMaxAge
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.AgeBuckets == 0 {
|
|
|
|
opts.AgeBuckets = DefAgeBuckets
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.BufCap == 0 {
|
|
|
|
opts.BufCap = DefBufCap
|
|
|
|
}
|
|
|
|
|
|
|
|
s := &summary{
|
|
|
|
desc: desc,
|
|
|
|
|
2015-01-20 20:27:10 +03:00
|
|
|
objectives: opts.Objectives,
|
|
|
|
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
2014-05-07 22:08:33 +04:00
|
|
|
|
|
|
|
labelPairs: makeLabelPairs(desc, labelValues),
|
|
|
|
|
|
|
|
hotBuf: make([]float64, 0, opts.BufCap),
|
|
|
|
coldBuf: make([]float64, 0, opts.BufCap),
|
|
|
|
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
|
|
|
|
}
|
|
|
|
s.headStreamExpTime = time.Now().Add(s.streamDuration)
|
|
|
|
s.hotBufExpTime = s.headStreamExpTime
|
|
|
|
|
|
|
|
for i := uint32(0); i < opts.AgeBuckets; i++ {
|
|
|
|
s.streams = append(s.streams, s.newStream())
|
|
|
|
}
|
|
|
|
s.headStream = s.streams[0]
|
|
|
|
|
2015-01-21 15:44:43 +03:00
|
|
|
for qu := range s.objectives {
|
2015-01-20 20:27:10 +03:00
|
|
|
s.sortedObjectives = append(s.sortedObjectives, qu)
|
|
|
|
}
|
|
|
|
sort.Float64s(s.sortedObjectives)
|
|
|
|
|
2016-08-03 02:09:27 +03:00
|
|
|
s.init(s) // Init self-collection.
|
2014-05-07 22:08:33 +04:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
type summary struct {
|
2016-08-03 02:09:27 +03:00
|
|
|
selfCollector
|
2014-05-07 22:08:33 +04:00
|
|
|
|
|
|
|
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
|
|
|
mtx sync.Mutex // Protects every other moving part.
|
|
|
|
// Lock bufMtx before mtx if both are needed.
|
|
|
|
|
|
|
|
desc *Desc
|
|
|
|
|
2015-01-20 20:27:10 +03:00
|
|
|
objectives map[float64]float64
|
|
|
|
sortedObjectives []float64
|
2014-05-07 22:08:33 +04:00
|
|
|
|
|
|
|
labelPairs []*dto.LabelPair
|
|
|
|
|
|
|
|
sum float64
|
|
|
|
cnt uint64
|
|
|
|
|
|
|
|
hotBuf, coldBuf []float64
|
|
|
|
|
|
|
|
streams []*quantile.Stream
|
|
|
|
streamDuration time.Duration
|
2015-01-21 15:44:43 +03:00
|
|
|
headStream *quantile.Stream
|
2014-05-07 22:08:33 +04:00
|
|
|
headStreamIdx int
|
|
|
|
headStreamExpTime, hotBufExpTime time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *summary) Desc() *Desc {
|
|
|
|
return s.desc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *summary) Observe(v float64) {
|
|
|
|
s.bufMtx.Lock()
|
|
|
|
defer s.bufMtx.Unlock()
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
if now.After(s.hotBufExpTime) {
|
|
|
|
s.asyncFlush(now)
|
|
|
|
}
|
|
|
|
s.hotBuf = append(s.hotBuf, v)
|
|
|
|
if len(s.hotBuf) == cap(s.hotBuf) {
|
|
|
|
s.asyncFlush(now)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Allow error reporting during metrics collection and simplify Register().
Both are interface changes I want to get in before public
announcement. They only break rare usage cases, and are always easy to
fix, but still we want to avoid breaking changes after a wider
announcement of the project.
The change of Register() simply removes the return of the Collector,
which nobody was using in practice. It was just bloating the call
syntax. Note that this is different from RegisterOrGet(), which is
used at various occasions where you want to register something that
might or might not be registered already, but if it is, you want the
previously registered Collector back (because that's the relevant
one).
WRT error reporting: I first tried the obvious way of letting the
Collector methods Describe() and Collect() return error. However, I
had to conclude that that bloated _many_ calls and their handling in
very obnoxious ways. On the other hand, the case where you actually
want to report errors during registration or collection is very
rare. Hence, this approach has the wrong trade-off. The approach taken
here might at first appear clunky but is in practice quite handy,
mostly because there is almost no change for the "normal" case of "no
special error handling", but also because it plays well with the way
descriptors and metrics are handled (via channels).
Explaining the approach in more detail:
- During registration / describe: Error handling was actually already
in place (for invalid descriptors, which carry an error anyway). I
only added a convenience function to create an invalid descriptor
with a given error on purpose.
- Metrics are now treated in a similar way. The Write method returns
an error now (the only change in interface). An "invalid metric" is
provided that can be sent via the channel to signal that that metric
could not be collected. It alse transports an error.
NON-GOALS OF THIS COMMIT:
This is NOT yet the major improvement of the whole registry part,
where we want a public Registry interface and plenty of modular
configurations (for error handling, various auto-metrics, http
instrumentation, testing, ...). However, we can do that whole thing
without breaking existing interfaces. For now (which is a significant
issue) any error during collection will either cause a 500 HTTP
response or a panic (depending on registry config). Later, we
definitely want to have a possibility to skip (and only report
somehow) non-collectible metrics instead of aborting the whole scrape.
2015-01-12 21:16:09 +03:00
|
|
|
func (s *summary) Write(out *dto.Metric) error {
|
2014-05-07 22:08:33 +04:00
|
|
|
sum := &dto.Summary{}
|
|
|
|
qs := make([]*dto.Quantile, 0, len(s.objectives))
|
|
|
|
|
|
|
|
s.bufMtx.Lock()
|
|
|
|
s.mtx.Lock()
|
2015-03-15 15:58:14 +03:00
|
|
|
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
|
|
|
s.swapBufs(time.Now())
|
2014-05-07 22:08:33 +04:00
|
|
|
s.bufMtx.Unlock()
|
|
|
|
|
|
|
|
s.flushColdBuf()
|
|
|
|
sum.SampleCount = proto.Uint64(s.cnt)
|
|
|
|
sum.SampleSum = proto.Float64(s.sum)
|
|
|
|
|
2015-01-20 20:27:10 +03:00
|
|
|
for _, rank := range s.sortedObjectives {
|
2015-03-15 15:58:14 +03:00
|
|
|
var q float64
|
|
|
|
if s.headStream.Count() == 0 {
|
|
|
|
q = math.NaN()
|
|
|
|
} else {
|
|
|
|
q = s.headStream.Query(rank)
|
|
|
|
}
|
2014-05-07 22:08:33 +04:00
|
|
|
qs = append(qs, &dto.Quantile{
|
|
|
|
Quantile: proto.Float64(rank),
|
2015-03-15 15:58:14 +03:00
|
|
|
Value: proto.Float64(q),
|
2014-05-07 22:08:33 +04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
s.mtx.Unlock()
|
|
|
|
|
|
|
|
if len(qs) > 0 {
|
|
|
|
sort.Sort(quantSort(qs))
|
|
|
|
}
|
|
|
|
sum.Quantile = qs
|
|
|
|
|
|
|
|
out.Summary = sum
|
|
|
|
out.Label = s.labelPairs
|
Allow error reporting during metrics collection and simplify Register().
Both are interface changes I want to get in before public
announcement. They only break rare usage cases, and are always easy to
fix, but still we want to avoid breaking changes after a wider
announcement of the project.
The change of Register() simply removes the return of the Collector,
which nobody was using in practice. It was just bloating the call
syntax. Note that this is different from RegisterOrGet(), which is
used at various occasions where you want to register something that
might or might not be registered already, but if it is, you want the
previously registered Collector back (because that's the relevant
one).
WRT error reporting: I first tried the obvious way of letting the
Collector methods Describe() and Collect() return error. However, I
had to conclude that that bloated _many_ calls and their handling in
very obnoxious ways. On the other hand, the case where you actually
want to report errors during registration or collection is very
rare. Hence, this approach has the wrong trade-off. The approach taken
here might at first appear clunky but is in practice quite handy,
mostly because there is almost no change for the "normal" case of "no
special error handling", but also because it plays well with the way
descriptors and metrics are handled (via channels).
Explaining the approach in more detail:
- During registration / describe: Error handling was actually already
in place (for invalid descriptors, which carry an error anyway). I
only added a convenience function to create an invalid descriptor
with a given error on purpose.
- Metrics are now treated in a similar way. The Write method returns
an error now (the only change in interface). An "invalid metric" is
provided that can be sent via the channel to signal that that metric
could not be collected. It alse transports an error.
NON-GOALS OF THIS COMMIT:
This is NOT yet the major improvement of the whole registry part,
where we want a public Registry interface and plenty of modular
configurations (for error handling, various auto-metrics, http
instrumentation, testing, ...). However, we can do that whole thing
without breaking existing interfaces. For now (which is a significant
issue) any error during collection will either cause a 500 HTTP
response or a panic (depending on registry config). Later, we
definitely want to have a possibility to skip (and only report
somehow) non-collectible metrics instead of aborting the whole scrape.
2015-01-12 21:16:09 +03:00
|
|
|
return nil
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *summary) newStream() *quantile.Stream {
|
2015-01-20 20:27:10 +03:00
|
|
|
return quantile.NewTargeted(s.objectives)
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// asyncFlush needs bufMtx locked.
|
|
|
|
func (s *summary) asyncFlush(now time.Time) {
|
|
|
|
s.mtx.Lock()
|
|
|
|
s.swapBufs(now)
|
|
|
|
|
|
|
|
// Unblock the original goroutine that was responsible for the mutation
|
|
|
|
// that triggered the compaction. But hold onto the global non-buffer
|
|
|
|
// state mutex until the operation finishes.
|
|
|
|
go func() {
|
|
|
|
s.flushColdBuf()
|
|
|
|
s.mtx.Unlock()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// rotateStreams needs mtx AND bufMtx locked.
|
|
|
|
func (s *summary) maybeRotateStreams() {
|
|
|
|
for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
|
2015-01-21 15:44:43 +03:00
|
|
|
s.headStream.Reset()
|
2014-05-07 22:08:33 +04:00
|
|
|
s.headStreamIdx++
|
|
|
|
if s.headStreamIdx >= len(s.streams) {
|
|
|
|
s.headStreamIdx = 0
|
|
|
|
}
|
|
|
|
s.headStream = s.streams[s.headStreamIdx]
|
|
|
|
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// flushColdBuf needs mtx locked.
|
|
|
|
func (s *summary) flushColdBuf() {
|
|
|
|
for _, v := range s.coldBuf {
|
2015-01-21 15:44:43 +03:00
|
|
|
for _, stream := range s.streams {
|
|
|
|
stream.Insert(v)
|
|
|
|
}
|
2014-05-07 22:08:33 +04:00
|
|
|
s.cnt++
|
|
|
|
s.sum += v
|
|
|
|
}
|
|
|
|
s.coldBuf = s.coldBuf[0:0]
|
|
|
|
s.maybeRotateStreams()
|
|
|
|
}
|
|
|
|
|
|
|
|
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
|
|
|
|
func (s *summary) swapBufs(now time.Time) {
|
2015-01-21 15:44:43 +03:00
|
|
|
if len(s.coldBuf) != 0 {
|
|
|
|
panic("coldBuf is not empty")
|
|
|
|
}
|
2014-05-07 22:08:33 +04:00
|
|
|
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
|
|
|
|
// hotBuf is now empty and gets new expiration set.
|
|
|
|
for now.After(s.hotBufExpTime) {
|
|
|
|
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type quantSort []*dto.Quantile
|
|
|
|
|
|
|
|
func (s quantSort) Len() int {
|
|
|
|
return len(s)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s quantSort) Swap(i, j int) {
|
|
|
|
s[i], s[j] = s[j], s[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s quantSort) Less(i, j int) bool {
|
|
|
|
return s[i].GetQuantile() < s[j].GetQuantile()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SummaryVec is a Collector that bundles a set of Summaries that all share the
|
|
|
|
// same Desc, but have different values for their variable labels. This is used
|
|
|
|
// if you want to count the same thing partitioned by various dimensions
|
2015-02-19 17:34:04 +03:00
|
|
|
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
2014-05-07 22:08:33 +04:00
|
|
|
// instances with NewSummaryVec.
|
|
|
|
type SummaryVec struct {
|
2017-06-28 18:55:59 +03:00
|
|
|
*metricVec
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
2017-06-28 18:55:59 +03:00
|
|
|
// partitioned by the given label names.
|
2014-05-07 22:08:33 +04:00
|
|
|
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
|
|
|
desc := NewDesc(
|
|
|
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
|
|
|
opts.Help,
|
|
|
|
labelNames,
|
|
|
|
opts.ConstLabels,
|
|
|
|
)
|
|
|
|
return &SummaryVec{
|
2017-06-28 18:55:59 +03:00
|
|
|
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
2016-08-11 06:03:15 +03:00
|
|
|
return newSummary(desc, opts, lvs...)
|
|
|
|
}),
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 18:55:59 +03:00
|
|
|
// GetMetricWithLabelValues returns the Summary for the given slice of label
|
|
|
|
// values (same order as the VariableLabels in Desc). If that combination of
|
|
|
|
// label values is accessed for the first time, a new Summary is created.
|
|
|
|
//
|
|
|
|
// It is possible to call this method without using the returned Summary to only
|
|
|
|
// create the new Summary but leave it at its starting value, a Summary without
|
|
|
|
// any observations.
|
|
|
|
//
|
|
|
|
// Keeping the Summary for later use is possible (and should be considered if
|
|
|
|
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
|
|
|
// Delete can be used to delete the Summary from the SummaryVec. In that case, the
|
|
|
|
// Summary will still exist, but it will not be exported anymore, even if a
|
|
|
|
// Summary with the same label values is created later. See also the CounterVec
|
|
|
|
// example.
|
|
|
|
//
|
|
|
|
// An error is returned if the number of label values is not the same as the
|
|
|
|
// number of VariableLabels in Desc.
|
|
|
|
//
|
|
|
|
// Note that for more than one label value, this method is prone to mistakes
|
|
|
|
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
|
|
|
// an alternative to avoid that type of mistake. For higher label numbers, the
|
|
|
|
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|
|
|
// with a performance overhead (for creating and processing the Labels map).
|
|
|
|
// See also the GaugeVec example.
|
2017-04-24 22:13:19 +03:00
|
|
|
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
|
2017-06-28 18:55:59 +03:00
|
|
|
metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
|
2014-05-07 22:08:33 +04:00
|
|
|
if metric != nil {
|
2017-04-24 22:13:19 +03:00
|
|
|
return metric.(Observer), err
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-06-28 18:55:59 +03:00
|
|
|
// GetMetricWith returns the Summary for the given Labels map (the label names
|
|
|
|
// must match those of the VariableLabels in Desc). If that label map is
|
|
|
|
// accessed for the first time, a new Summary is created. Implications of
|
|
|
|
// creating a Summary without using it and keeping the Summary for later use are
|
|
|
|
// the same as for GetMetricWithLabelValues.
|
|
|
|
//
|
|
|
|
// An error is returned if the number and names of the Labels are inconsistent
|
|
|
|
// with those of the VariableLabels in Desc.
|
|
|
|
//
|
|
|
|
// This method is used for the same purpose as
|
|
|
|
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
|
|
|
// methods.
|
2017-04-24 22:13:19 +03:00
|
|
|
func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
2017-06-28 18:55:59 +03:00
|
|
|
metric, err := m.metricVec.getMetricWith(labels)
|
2014-05-07 22:08:33 +04:00
|
|
|
if metric != nil {
|
2017-04-24 22:13:19 +03:00
|
|
|
return metric.(Observer), err
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|
|
|
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|
|
|
// error, WithLabelValues allows shortcuts like
|
2015-02-18 21:23:34 +03:00
|
|
|
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
2017-04-24 22:13:19 +03:00
|
|
|
func (m *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
2017-06-28 18:55:59 +03:00
|
|
|
return m.metricVec.withLabelValues(lvs...).(Observer)
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|
|
|
// returned an error. By not returning an error, With allows shortcuts like
|
2015-02-18 21:23:34 +03:00
|
|
|
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
2017-04-24 22:13:19 +03:00
|
|
|
func (m *SummaryVec) With(labels Labels) Observer {
|
2017-06-28 18:55:59 +03:00
|
|
|
return m.metricVec.with(labels).(Observer)
|
2014-05-07 22:08:33 +04:00
|
|
|
}
|
2015-05-04 01:32:15 +03:00
|
|
|
|
|
|
|
type constSummary struct {
|
|
|
|
desc *Desc
|
|
|
|
count uint64
|
|
|
|
sum float64
|
|
|
|
quantiles map[float64]float64
|
|
|
|
labelPairs []*dto.LabelPair
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *constSummary) Desc() *Desc {
|
|
|
|
return s.desc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *constSummary) Write(out *dto.Metric) error {
|
|
|
|
sum := &dto.Summary{}
|
|
|
|
qs := make([]*dto.Quantile, 0, len(s.quantiles))
|
|
|
|
|
|
|
|
sum.SampleCount = proto.Uint64(s.count)
|
|
|
|
sum.SampleSum = proto.Float64(s.sum)
|
|
|
|
|
|
|
|
for rank, q := range s.quantiles {
|
|
|
|
qs = append(qs, &dto.Quantile{
|
|
|
|
Quantile: proto.Float64(rank),
|
|
|
|
Value: proto.Float64(q),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(qs) > 0 {
|
|
|
|
sort.Sort(quantSort(qs))
|
|
|
|
}
|
|
|
|
sum.Quantile = qs
|
|
|
|
|
|
|
|
out.Summary = sum
|
|
|
|
out.Label = s.labelPairs
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewConstSummary returns a metric representing a Prometheus summary with fixed
|
|
|
|
// values for the count, sum, and quantiles. As those parameters cannot be
|
|
|
|
// changed, the returned value does not implement the Summary interface (but
|
|
|
|
// only the Metric interface). Users of this package will not have much use for
|
|
|
|
// it in regular operations. However, when implementing custom Collectors, it is
|
|
|
|
// useful as a throw-away metric that is generated on the fly to send it to
|
|
|
|
// Prometheus in the Collect method.
|
|
|
|
//
|
|
|
|
// quantiles maps ranks to quantile values. For example, a median latency of
|
|
|
|
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
|
|
|
|
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
|
|
|
//
|
|
|
|
// NewConstSummary returns an error if the length of labelValues is not
|
|
|
|
// consistent with the variable labels in Desc.
|
|
|
|
func NewConstSummary(
|
|
|
|
desc *Desc,
|
|
|
|
count uint64,
|
|
|
|
sum float64,
|
|
|
|
quantiles map[float64]float64,
|
|
|
|
labelValues ...string,
|
|
|
|
) (Metric, error) {
|
2017-08-19 23:57:48 +03:00
|
|
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
|
|
|
return nil, err
|
2015-05-04 01:32:15 +03:00
|
|
|
}
|
|
|
|
return &constSummary{
|
|
|
|
desc: desc,
|
|
|
|
count: count,
|
|
|
|
sum: sum,
|
|
|
|
quantiles: quantiles,
|
|
|
|
labelPairs: makeLabelPairs(desc, labelValues),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustNewConstSummary is a version of NewConstSummary that panics where
|
|
|
|
// NewConstMetric would have returned an error.
|
|
|
|
func MustNewConstSummary(
|
|
|
|
desc *Desc,
|
|
|
|
count uint64,
|
|
|
|
sum float64,
|
|
|
|
quantiles map[float64]float64,
|
|
|
|
labelValues ...string,
|
|
|
|
) Metric {
|
|
|
|
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|