diff --git a/NOTICE b/NOTICE index 37e4a7d..dd878a3 100644 --- a/NOTICE +++ b/NOTICE @@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/). The following components are included in this product: -goautoneg -http://bitbucket.org/ww/goautoneg -Copyright 2011, Open Knowledge Foundation Ltd. -See README.txt for license details. - perks - a fork of https://github.com/bmizerany/perks https://github.com/beorn7/perks Copyright 2013-2015 Blake Mizerany, Björn Rabenstein diff --git a/prometheus/README.md b/prometheus/README.md index 81032be..44986bf 100644 --- a/prometheus/README.md +++ b/prometheus/README.md @@ -1,53 +1 @@ -# Overview -This is the [Prometheus](http://www.prometheus.io) telemetric -instrumentation client [Go](http://golang.org) client library. It -enable authors to define process-space metrics for their servers and -expose them through a web service interface for extraction, -aggregation, and a whole slew of other post processing techniques. - -# Installing - $ go get github.com/prometheus/client_golang/prometheus - -# Example -```go -package main - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - indexed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "my_company", - Subsystem: "indexer", - Name: "documents_indexed", - Help: "The number of documents indexed.", - }) - size = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "my_company", - Subsystem: "storage", - Name: "documents_total_size_bytes", - Help: "The total size of all documents in the storage.", - }) -) - -func main() { - http.Handle("/metrics", prometheus.Handler()) - - indexed.Inc() - size.Set(5) - - http.ListenAndServe(":8080", nil) -} - -func init() { - prometheus.MustRegister(indexed) - prometheus.MustRegister(size) -} -``` - -# Documentation - -[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/prometheus/collector.go b/prometheus/collector.go index c046880..623d3d8 100644 --- a/prometheus/collector.go +++ b/prometheus/collector.go @@ -15,15 +15,15 @@ package prometheus // Collector is the interface implemented by anything that can be used by // Prometheus to collect metrics. A Collector has to be registered for -// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. +// collection. See Registerer.Register. // -// The stock metrics provided by this package (like Gauge, Counter, Summary) are -// also Collectors (which only ever collect one metric, namely itself). An -// implementer of Collector may, however, collect multiple metrics in a -// coordinated fashion and/or create metrics on the fly. Examples for collectors -// already implemented in this library are the metric vectors (i.e. collection -// of multiple instances of the same Metric but with different label values) -// like GaugeVec or SummaryVec, and the ExpvarCollector. +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. type Collector interface { // Describe sends the super-set of all possible descriptors of metrics // collected by this Collector to the provided channel and returns once @@ -37,39 +37,39 @@ type Collector interface { // executing this method, it must send an invalid descriptor (created // with NewInvalidDesc) to signal the error to the registry. Describe(chan<- *Desc) - // Collect is called by Prometheus when collecting metrics. The - // implementation sends each collected metric via the provided channel - // and returns once the last metric has been sent. The descriptor of - // each sent metric is one of those returned by Describe. Returned - // metrics that share the same descriptor must differ in their variable - // label values. This method may be called concurrently and must - // therefore be implemented in a concurrency safe way. Blocking occurs - // at the expense of total performance of rendering all registered - // metrics. Ideally, Collector implementations support concurrent - // readers. + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. Collect(chan<- Metric) } -// SelfCollector implements Collector for a single Metric so that that the -// Metric collects itself. Add it as an anonymous field to a struct that -// implements Metric, and call Init with the Metric itself as an argument. -type SelfCollector struct { +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { self Metric } -// Init provides the SelfCollector with a reference to the metric it is supposed +// init provides the selfCollector with a reference to the metric it is supposed // to collect. It is usually called within the factory function to create a // metric. See example. -func (c *SelfCollector) Init(self Metric) { +func (c *selfCollector) init(self Metric) { c.self = self } // Describe implements Collector. -func (c *SelfCollector) Describe(ch chan<- *Desc) { +func (c *selfCollector) Describe(ch chan<- *Desc) { ch <- c.self.Desc() } // Collect implements Collector. -func (c *SelfCollector) Collect(ch chan<- Metric) { +func (c *selfCollector) Collect(ch chan<- Metric) { ch <- c.self } diff --git a/prometheus/counter.go b/prometheus/counter.go index d2a564b..0537f88 100644 --- a/prometheus/counter.go +++ b/prometheus/counter.go @@ -35,6 +35,9 @@ type Counter interface { // Prometheus metric. Do not use it for regular handling of a // Prometheus counter (as it can be used to break the contract of // monotonically increasing values). + // + // Deprecated: Use NewConstMetric to create a counter for an external + // value. A Counter should never be set. Set(float64) // Inc increments the counter by 1. Inc() @@ -55,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter { opts.ConstLabels, ) result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} - result.Init(result) // Init self-collection. + result.init(result) // Init self-collection. return result } @@ -102,7 +105,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { valType: CounterValue, labelPairs: makeLabelPairs(desc, lvs), }} - result.Init(result) // Init self-collection. + result.init(result) // Init self-collection. return result }, }, diff --git a/prometheus/desc.go b/prometheus/desc.go index ee02d9b..77f4b30 100644 --- a/prometheus/desc.go +++ b/prometheus/desc.go @@ -1,3 +1,16 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package prometheus import ( diff --git a/prometheus/doc.go b/prometheus/doc.go index ca56f5e..c867860 100644 --- a/prometheus/doc.go +++ b/prometheus/doc.go @@ -11,18 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus provides embeddable metric primitives for servers and -// standardized exposition of telemetry through a web services interface. +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). // // All exported functions and methods are safe to be used concurrently unless -// specified otherwise. +//specified otherwise. // -// To expose metrics registered with the Prometheus registry, an HTTP server -// needs to know about the Prometheus handler. The usual endpoint is "/metrics". +// A Basic Example // -// http.Handle("/metrics", prometheus.Handler()) -// -// As a starting point a very basic usage example: +// As a starting point, a very basic usage example: // // package main // @@ -30,6 +29,7 @@ // "net/http" // // "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // var ( @@ -37,75 +37,145 @@ // Name: "cpu_temperature_celsius", // Help: "Current temperature of the CPU.", // }) -// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) // ) // // func init() { +// // Metrics have to be registered to be exposed: // prometheus.MustRegister(cpuTemp) // prometheus.MustRegister(hdFailures) // } // // func main() { // cpuTemp.Set(65.3) -// hdFailures.Inc() +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // -// http.Handle("/metrics", prometheus.Handler()) +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) // http.ListenAndServe(":8080", nil) // } // // -// This is a complete program that exports two metrics, a Gauge and a Counter. -// It also exports some stats about the HTTP usage of the /metrics -// endpoint. (See the Handler function for more detail.) +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the later with a label attached to turn it into a (one-dimensional) vector. // -// Two more advanced metric types are the Summary and Histogram. A more -// thorough description of metric types can be found in the prometheus docs: +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. +// HistogramVec, and UntypedVec. // -// Those are all the parts needed for basic usage. Detailed documentation and -// examples are provided below. +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. // -// Everything else this package offers is essentially for "power users" only. A -// few pointers to "power user features": +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, +// HistogramOpts, or UntypedOpts. // -// All the various ...Opts structs have a ConstLabels field for labels that -// never change their value (which is only useful under special circumstances, -// see documentation of the Opts type). +// Custom Collectors and constant Metrics // -// The Untyped metric behaves like a Gauge, but signals the Prometheus server -// not to assume anything about its type. +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). // -// Functions to fine-tune how the metric registry works: EnableCollectChecks, -// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. // -// For custom metric collection, there are two entry points: Custom Metric -// implementations and custom Collector implementations. A Metric is the -// fundamental unit in the Prometheus data model: a sample at a point in time -// together with its meta-data (like its fully-qualified name and any number of -// pairs of label name and label value) that knows how to marshal itself into a -// data transfer object (aka DTO, implemented as a protocol buffer). A Collector -// gets registered with the Prometheus registry and manages the collection of -// one or more Metrics. Many parts of this package are building blocks for -// Metrics and Collectors. Desc is the metric descriptor, actually used by all -// metrics under the hood, and by Collectors to describe the Metrics to be -// collected, but only to be dealt with by users if they implement their own -// Metrics or Collectors. To create a Desc, the BuildFQName function will come -// in handy. Other useful components for Metric and Collector implementation -// include: LabelPairSorter to sort the DTO version of label pairs, -// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at -// collection time, MetricVec to bundle custom Metrics into a metric vector -// Collector, SelfCollector to make a custom Metric collect itself. +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. // -// A good example for a custom Collector is the ExpVarCollector included in this -// package, which exports variables exported via the "expvar" package as -// Prometheus metrics. +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might +// cause. As suggested by the name, MustRegister panics if an error occurs. With +// the Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data +// model. Inconsistencies are ideally detected at registration time, not at +// collect time. The former will usually be detected at start-up time of a +// program, while the latter will only happen at scrape time, possibly not even +// on the first scrape if the inconsistency only becomes relevant later. That is +// the main reason why a Collector and a Metric have to describe themselves to +// the registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in +// the same way on a custom registry as the global functions Register and +// Unregister on the default registry. +// +// There are a number of uses for custom registries: You can use registries +// with special properties, see NewPedanticRegistry. You can avoid global state, +// as it is imposed by the DefaultRegistry. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegistry comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp +// sub-package. (The top-level functions in the prometheus package are +// deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added. Sending metrics to +// Graphite would be an example that will soon be implemented. package prometheus diff --git a/prometheus/example_clustermanager_test.go b/prometheus/example_clustermanager_test.go index 6f3e215..260c1b5 100644 --- a/prometheus/example_clustermanager_test.go +++ b/prometheus/example_clustermanager_test.go @@ -13,11 +13,7 @@ package prometheus_test -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" -) +import "github.com/prometheus/client_golang/prometheus" // ClusterManager is an example for a system that might have been built without // Prometheus in mind. It models a central manager of jobs running in a @@ -29,10 +25,9 @@ import ( // make use of ConstLabels to be able to register each ClusterManager instance // with Prometheus. type ClusterManager struct { - Zone string - OOMCount *prometheus.CounterVec - RAMUsage *prometheus.GaugeVec - mtx sync.Mutex // Protects OOMCount and RAMUsage. + Zone string + OOMCountDesc *prometheus.Desc + RAMUsageDesc *prometheus.Desc // ... many more fields } @@ -55,76 +50,69 @@ func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( return } -// Describe faces the interesting challenge that the two metric vectors that are -// used in this example are already Collectors themselves. However, thanks to -// the use of channels, it is really easy to "chain" Collectors. Here we simply -// call the Describe methods of the two metric vectors. +// Describe simply sends the two Descs in the struct to the channel. func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { - c.OOMCount.Describe(ch) - c.RAMUsage.Describe(ch) + ch <- c.OOMCountDesc + ch <- c.RAMUsageDesc } // Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it -// sets the retrieved values in the two metric vectors and then sends all their -// metrics to the channel (again using a chaining technique as in the Describe -// method). Since Collect could be called multiple times concurrently, that part -// is protected by a mutex. +// creates constant metrics for each host on the fly based on the returned data. +// +// Note that Collect could be called concurrently, so we depend on +// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() - c.mtx.Lock() - defer c.mtx.Unlock() for host, oomCount := range oomCountByHost { - c.OOMCount.WithLabelValues(host).Set(float64(oomCount)) + ch <- prometheus.MustNewConstMetric( + c.OOMCountDesc, + prometheus.CounterValue, + float64(oomCount), + host, + ) } for host, ramUsage := range ramUsageByHost { - c.RAMUsage.WithLabelValues(host).Set(ramUsage) + ch <- prometheus.MustNewConstMetric( + c.RAMUsageDesc, + prometheus.GaugeValue, + ramUsage, + host, + ) } - c.OOMCount.Collect(ch) - c.RAMUsage.Collect(ch) - // All metrics in OOMCount and RAMUsage are sent to the channel now. We - // can safely reset the two metric vectors now, so that we can start - // fresh in the next Collect cycle. (Imagine a host disappears from the - // cluster. If we did not reset here, its Metric would stay in the - // metric vectors forever.) - c.OOMCount.Reset() - c.RAMUsage.Reset() } -// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note +// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note // that the zone is set as a ConstLabel. (It's different in each instance of the -// ClusterManager, but constant over the lifetime of an instance.) The reported -// values are partitioned by host, which is therefore a variable label. +// ClusterManager, but constant over the lifetime of an instance.) Then there is +// a variable label "host", since we want to partition the collected metrics by +// host. Since all Descs created in this way are consistent across instances, +// with a guaranteed distinction by the "zone" label, we can register different +// ClusterManager instances with the same registry. func NewClusterManager(zone string) *ClusterManager { return &ClusterManager{ Zone: zone, - OOMCount: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: "clustermanager", - Name: "oom_count", - Help: "number of OOM crashes", - ConstLabels: prometheus.Labels{"zone": zone}, - }, + OOMCountDesc: prometheus.NewDesc( + "clustermanager_oom_crashes_total", + "Number of OOM crashes.", []string{"host"}, + prometheus.Labels{"zone": zone}, ), - RAMUsage: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Subsystem: "clustermanager", - Name: "ram_usage_bytes", - Help: "RAM usage as reported to the cluster manager", - ConstLabels: prometheus.Labels{"zone": zone}, - }, + RAMUsageDesc: prometheus.NewDesc( + "clustermanager_ram_usage_bytes", + "RAM usage as reported to the cluster manager.", []string{"host"}, + prometheus.Labels{"zone": zone}, ), } } -func ExampleCollector_clustermanager() { +func ExampleCollector() { workerDB := NewClusterManager("db") workerCA := NewClusterManager("ca") - prometheus.MustRegister(workerDB) - prometheus.MustRegister(workerCA) // Since we are dealing with custom Collector implementations, it might - // be a good idea to enable the collect checks in the registry. - prometheus.EnableCollectChecks(true) + // be a good idea to try it out with a pedantic registry. + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(workerDB) + reg.MustRegister(workerCA) } diff --git a/prometheus/example_memstats_test.go b/prometheus/example_memstats_test.go deleted file mode 100644 index a84d072..0000000 --- a/prometheus/example_memstats_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "runtime" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - allocDesc = prometheus.NewDesc( - prometheus.BuildFQName("", "memstats", "alloc_bytes"), - "bytes allocated and still in use", - nil, nil, - ) - totalAllocDesc = prometheus.NewDesc( - prometheus.BuildFQName("", "memstats", "total_alloc_bytes"), - "bytes allocated (even if freed)", - nil, nil, - ) - numGCDesc = prometheus.NewDesc( - prometheus.BuildFQName("", "memstats", "num_gc_total"), - "number of GCs run", - nil, nil, - ) -) - -// MemStatsCollector is an example for a custom Collector that solves the -// problem of feeding into multiple metrics at the same time. The -// runtime.ReadMemStats should happen only once, and then the results need to be -// fed into a number of separate Metrics. In this example, only a few of the -// values reported by ReadMemStats are used. For each, there is a Desc provided -// as a var, so the MemStatsCollector itself needs nothing else in the -// struct. Only the methods need to be implemented. -type MemStatsCollector struct{} - -// Describe just sends the three Desc objects for the Metrics we intend to -// collect. -func (_ MemStatsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- allocDesc - ch <- totalAllocDesc - ch <- numGCDesc -} - -// Collect does the trick by calling ReadMemStats once and then constructing -// three different Metrics on the fly. -func (_ MemStatsCollector) Collect(ch chan<- prometheus.Metric) { - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - ch <- prometheus.MustNewConstMetric( - allocDesc, - prometheus.GaugeValue, - float64(ms.Alloc), - ) - ch <- prometheus.MustNewConstMetric( - totalAllocDesc, - prometheus.GaugeValue, - float64(ms.TotalAlloc), - ) - ch <- prometheus.MustNewConstMetric( - numGCDesc, - prometheus.CounterValue, - float64(ms.NumGC), - ) - // To avoid new allocations on each collection, you could also keep - // metric objects around and return the same objects each time, just - // with new values set. -} - -func ExampleCollector_memstats() { - prometheus.MustRegister(&MemStatsCollector{}) - // Since we are dealing with custom Collector implementations, it might - // be a good idea to enable the collect checks in the registry. - prometheus.EnableCollectChecks(true) -} diff --git a/prometheus/example_selfcollector_test.go b/prometheus/example_selfcollector_test.go deleted file mode 100644 index 608deeb..0000000 --- a/prometheus/example_selfcollector_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "runtime" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -func NewCallbackMetric(desc *prometheus.Desc, callback func() float64) *CallbackMetric { - result := &CallbackMetric{desc: desc, callback: callback} - result.Init(result) // Initialize the SelfCollector. - return result -} - -// TODO: Come up with a better example. - -// CallbackMetric is an example for a user-defined Metric that exports the -// result of a function call as a metric of type "untyped" without any -// labels. It uses SelfCollector to turn the Metric into a Collector so that it -// can be registered with Prometheus. -// -// Note that this example is pretty much academic as the prometheus package -// already provides an UntypedFunc type. -type CallbackMetric struct { - prometheus.SelfCollector - - desc *prometheus.Desc - callback func() float64 -} - -func (cm *CallbackMetric) Desc() *prometheus.Desc { - return cm.desc -} - -func (cm *CallbackMetric) Write(m *dto.Metric) error { - m.Untyped = &dto.Untyped{Value: proto.Float64(cm.callback())} - return nil -} - -func ExampleSelfCollector() { - m := NewCallbackMetric( - prometheus.NewDesc( - "runtime_goroutines_count", - "Total number of goroutines that currently exist.", - nil, nil, // No labels, these must be nil. - ), - func() float64 { - return float64(runtime.NumGoroutine()) - }, - ) - prometheus.MustRegister(m) -} diff --git a/prometheus/examples_test.go b/prometheus/examples_test.go index 4863b95..f87f21a 100644 --- a/prometheus/examples_test.go +++ b/prometheus/examples_test.go @@ -14,15 +14,16 @@ package prometheus_test import ( + "bytes" "fmt" "math" "net/http" - "os" "runtime" "sort" - "time" + "strings" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/golang/protobuf/proto" @@ -49,10 +50,10 @@ func ExampleGauge() { func ExampleGaugeVec() { opsQueued := prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: "our_company", - Subsystem: "blob_storage", - Name: "ops_queued", - Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", }, []string{ // Which user has requested the operation? @@ -122,8 +123,8 @@ func ExampleCounter() { func ExampleCounterVec() { httpReqs := prometheus.NewCounterVec( prometheus.CounterOpts{ - Name: "http_requests_total", - Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", }, []string{"code", "method"}, ) @@ -387,89 +388,90 @@ func ExampleSummaryVec() { temps.WithLabelValues("leiopelma-hochstetteri") // Just for demonstration, let's check the state of the summary vector - // by (ab)using its Collect method and the Write method of its elements - // (which is usually only used by Prometheus internally - code like the - // following will never appear in your own code). - metricChan := make(chan prometheus.Metric) - go func() { - defer close(metricChan) - temps.Collect(metricChan) - }() + // by registering it with a custom registry and then let it collect the + // metrics. + reg := prometheus.NewRegistry() + reg.MustRegister(temps) - metricStrings := []string{} - for metric := range metricChan { - dtoMetric := &dto.Metric{} - metric.Write(dtoMetric) - metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric)) + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") } - sort.Strings(metricStrings) // For reproducible print order. - fmt.Println(metricStrings) + fmt.Println(proto.MarshalTextString(metricFamilies[0])) // Output: - // [label: < - // name: "species" - // value: "leiopelma-hochstetteri" - // > - // summary: < - // sample_count: 0 - // sample_sum: 0 - // quantile: < - // quantile: 0.5 - // value: nan + // name: "pond_temperature_celsius" + // help: "The temperature of the frog pond." + // type: SUMMARY + // metric: < + // label: < + // name: "species" + // value: "leiopelma-hochstetteri" // > - // quantile: < - // quantile: 0.9 - // value: nan - // > - // quantile: < - // quantile: 0.99 - // value: nan + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > // > // > - // label: < - // name: "species" - // value: "lithobates-catesbeianus" - // > - // summary: < - // sample_count: 1000 - // sample_sum: 31956.100000000017 - // quantile: < - // quantile: 0.5 - // value: 32.4 + // metric: < + // label: < + // name: "species" + // value: "lithobates-catesbeianus" // > - // quantile: < - // quantile: 0.9 - // value: 41.4 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > // > // > - // label: < - // name: "species" - // value: "litoria-caerulea" - // > - // summary: < - // sample_count: 1000 - // sample_sum: 29969.50000000001 - // quantile: < - // quantile: 0.5 - // value: 31.1 + // metric: < + // label: < + // name: "species" + // value: "litoria-caerulea" // > - // quantile: < - // quantile: 0.9 - // value: 41.3 - // > - // quantile: < - // quantile: 0.99 - // value: 41.9 + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > // > // > - // ] } -func ExampleConstSummary() { +func ExampleNewConstSummary() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A summary of the HTTP request durations.", @@ -565,7 +567,7 @@ func ExampleHistogram() { // > } -func ExampleConstHistogram() { +func ExampleNewConstHistogram() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A histogram of the HTTP request durations.", @@ -623,18 +625,127 @@ func ExampleConstHistogram() { // > } -func ExamplePushCollectors() { - hostname, _ := os.Hostname() - completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "db_backup_last_completion_time", - Help: "The timestamp of the last succesful completion of a DB backup.", +func ExampleAlreadyRegisteredError() { + reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "requests_total", + Help: "The total number of requests served.", }) - completionTime.Set(float64(time.Now().Unix())) - if err := prometheus.PushCollectors( - "db_backup", hostname, - "http://pushgateway:9091", - completionTime, - ); err != nil { - fmt.Println("Could not push completion time to Pushgateway:", err) + if err := prometheus.Register(reqCounter); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + // A counter for that metric has been registered before. + // Use the old counter from now on. + reqCounter = are.ExistingCollector.(prometheus.Counter) + } else { + // Something else went wrong! + panic(err) + } } } + +func ExampleGatherers() { + reg := prometheus.NewRegistry() + temp := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "temperature_kelvin", + Help: "Temperature in Kelvin.", + }, + []string{"location"}, + ) + reg.MustRegister(temp) + temp.WithLabelValues("outside").Set(273.14) + temp.WithLabelValues("inside").Set(298.44) + + var parser expfmt.TextParser + + text := ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +temperature_kelvin{location="somewhere else"} 4.5 +` + + parseText := func() ([]*dto.MetricFamily, error) { + parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) + if err != nil { + return nil, err + } + var result []*dto.MetricFamily + for _, mf := range parsed { + result = append(result, mf) + } + return result, nil + } + + gatherers := prometheus.Gatherers{ + reg, + prometheus.GathererFunc(parseText), + } + + gathering, err := gatherers.Gather() + if err != nil { + fmt.Println(err) + } + + out := &bytes.Buffer{} + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + fmt.Println("----------") + + // Note how the temperature_kelvin metric family has been merged from + // different sources. Now try + text = ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +# Duplicate metric: +temperature_kelvin{location="outside"} 265.3 + # Wrong labels: +temperature_kelvin 4.5 +` + + gathering, err = gatherers.Gather() + if err != nil { + fmt.Println(err) + } + // Note that still as many metrics as possible are returned: + out.Reset() + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + + // Output: + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 + // temperature_kelvin{location="somewhere else"} 4.5 + // ---------- + // 2 error(s) occurred: + // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values + // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 +} diff --git a/prometheus/expvar.go b/prometheus/expvar_collector.go similarity index 81% rename from prometheus/expvar.go rename to prometheus/expvar_collector.go index 0f7630d..18a99d5 100644 --- a/prometheus/expvar.go +++ b/prometheus/expvar_collector.go @@ -18,21 +18,21 @@ import ( "expvar" ) -// ExpvarCollector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the ExpvarCollector is inherently -// slow. Thus, the ExpvarCollector is probably great for experiments and -// prototying, but you should seriously consider a more direct implementation of -// Prometheus metrics for monitoring production systems. -// -// Use NewExpvarCollector to create new instances. -type ExpvarCollector struct { +type expvarCollector struct { exports map[string]*Desc } -// NewExpvarCollector returns a newly allocated ExpvarCollector that still has -// to be registered with the Prometheus registry. +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. // // The exports map has the following meaning: // @@ -59,21 +59,21 @@ type ExpvarCollector struct { // sample values. // // Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { - return &ExpvarCollector{ +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ exports: exports, } } // Describe implements Collector. -func (e *ExpvarCollector) Describe(ch chan<- *Desc) { +func (e *expvarCollector) Describe(ch chan<- *Desc) { for _, desc := range e.exports { ch <- desc } } // Collect implements Collector. -func (e *ExpvarCollector) Collect(ch chan<- Metric) { +func (e *expvarCollector) Collect(ch chan<- Metric) { for name, desc := range e.exports { var m Metric expVar := expvar.Get(name) diff --git a/prometheus/expvar_test.go b/prometheus/expvar_collector_test.go similarity index 100% rename from prometheus/expvar_test.go rename to prometheus/expvar_collector_test.go diff --git a/prometheus/go_collector.go b/prometheus/go_collector.go index b0d4fb9..abc9d4e 100644 --- a/prometheus/go_collector.go +++ b/prometheus/go_collector.go @@ -17,7 +17,7 @@ type goCollector struct { // NewGoCollector returns a collector which exports metrics about the current // go process. -func NewGoCollector() *goCollector { +func NewGoCollector() Collector { return &goCollector{ goroutines: NewGauge(GaugeOpts{ Namespace: "go", diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 7a68910..11a9083 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -51,11 +51,11 @@ type Histogram interface { // bucket of a histogram ("le" -> "less or equal"). const bucketLabel = "le" +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. var ( - // DefBuckets are the default Histogram buckets. The default buckets are - // tailored to broadly measure the response time (in seconds) of a - // network service. Most likely, however, you will be required to define - // buckets customized to your use case. DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} errBucketLabelNotAllowed = fmt.Errorf( @@ -210,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr // Finally we know the final length of h.upperBounds and can make counts. h.counts = make([]uint64, len(h.upperBounds)) - h.Init(h) // Init self-collection. + h.init(h) // Init self-collection. return h } @@ -222,7 +222,7 @@ type histogram struct { sumBits uint64 count uint64 - SelfCollector + selfCollector // Note that there is no mutex required. desc *Desc diff --git a/prometheus/http.go b/prometheus/http.go index e078e3e..67ee5ac 100644 --- a/prometheus/http.go +++ b/prometheus/http.go @@ -15,14 +15,114 @@ package prometheus import ( "bufio" + "bytes" + "compress/gzip" + "fmt" "io" "net" "net/http" "strconv" "strings" + "sync" "time" + + "github.com/prometheus/common/expfmt" ) +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is non instrumented). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + var instLabels = []string{"method", "code"} type nower interface { @@ -58,7 +158,7 @@ func nowSeries(t ...time.Time) nower { // value. http_requests_total is a metric vector partitioned by HTTP method // (label name "method") and HTTP status code (label name "code"). // -// Note that InstrumentHandler has several issues: +// Deprecated: InstrumentHandler has several issues: // // - It uses Summaries rather than Histograms. Summaries are not useful if // aggregation across multiple instances is required. @@ -73,8 +173,8 @@ func nowSeries(t ...time.Time) nower { // performing such writes. // // Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Consider this function -// DEPRECATED and prefer direct instrumentation in the meantime. +// handlers that are more flexible and have fewer issues. Please prefer direct +// instrumentation in the meantime. func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) } @@ -82,6 +182,9 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun // InstrumentHandlerFunc wraps the given function for instrumentation. It // otherwise works in the same way as InstrumentHandler (and shares the same // issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { return InstrumentHandlerFuncWithOpts( SummaryOpts{ @@ -117,6 +220,9 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri // cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, // and all its fields are set to the equally named fields in the provided // SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) } @@ -125,6 +231,9 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand // the same issues) but provides more flexibility (at the cost of a more complex // call syntax). See InstrumentHandlerWithOpts for details how the provided // SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { reqCnt := NewCounterVec( CounterOpts{ diff --git a/prometheus/metric.go b/prometheus/metric.go index 86fd81c..d4063d9 100644 --- a/prometheus/metric.go +++ b/prometheus/metric.go @@ -22,10 +22,8 @@ import ( const separatorByte byte = 255 // A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, -// Untyped, and Summary. Users can implement their own Metric types, but that -// should be rarely needed. See the example for SelfCollector, which is also an -// example for a user-implemented Metric. +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. type Metric interface { // Desc returns the descriptor for the Metric. This method idempotently // returns the same descriptor throughout the lifetime of the @@ -36,21 +34,23 @@ type Metric interface { // Write encodes the Metric into a "Metric" Protocol Buffer data // transmission object. // - // Implementers of custom Metric types must observe concurrency safety - // as reads of this metric may occur at any time, and any blocking - // occurs at the expense of total performance of rendering all - // registered metrics. Ideally Metric implementations should support - // concurrent readers. + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. // - // The Prometheus client library attempts to minimize memory allocations - // and will provide a pre-existing reset dto.Metric pointer. Prometheus - // may recycle the dto.Metric proto message, so Metric implementations - // should just populate the provided dto.Metric and then should not keep - // any reference to it. - // - // While populating dto.Metric, labels must be sorted lexicographically. - // (Implementers may find LabelPairSorter useful for that.) + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". } // Opts bundles the options for creating most Metric types. Each metric diff --git a/prometheus/process_collector.go b/prometheus/process_collector.go index d8cf0ed..e31e62e 100644 --- a/prometheus/process_collector.go +++ b/prometheus/process_collector.go @@ -28,7 +28,7 @@ type processCollector struct { // NewProcessCollector returns a collector which exports the current state of // process metrics including cpu, memory and file descriptor usage as well as // the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) *processCollector { +func NewProcessCollector(pid int, namespace string) Collector { return NewProcessCollectorPIDFn( func() (int, error) { return pid, nil }, namespace, @@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector { func NewProcessCollectorPIDFn( pidFn func() (int, error), namespace string, -) *processCollector { +) Collector { c := processCollector{ pidFn: pidFn, collectFn: func(chan<- Metric) {}, diff --git a/prometheus/process_collector_test.go b/prometheus/process_collector_test.go index f00e91e..d3362da 100644 --- a/prometheus/process_collector_test.go +++ b/prometheus/process_collector_test.go @@ -1,13 +1,12 @@ package prometheus import ( - "io/ioutil" - "net/http" - "net/http/httptest" + "bytes" "os" "regexp" "testing" + "github.com/prometheus/common/expfmt" "github.com/prometheus/procfs" ) @@ -16,21 +15,26 @@ func TestProcessCollector(t *testing.T) { t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) } - registry := newRegistry() - registry.Register(NewProcessCollector(os.Getpid(), "")) - registry.Register(NewProcessCollectorPIDFn( - func() (int, error) { return os.Getpid(), nil }, "foobar")) + registry := NewRegistry() + if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { + t.Fatal(err) + } + if err := registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar"), + ); err != nil { + t.Fatal(err) + } - s := httptest.NewServer(InstrumentHandler("prometheus", registry)) - defer s.Close() - r, err := http.Get(s.URL) + mfs, err := registry.Gather() if err != nil { t.Fatal(err) } - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Fatal(err) + + var buf bytes.Buffer + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { + t.Fatal(err) + } } for _, re := range []*regexp.Regexp{ @@ -47,8 +51,8 @@ func TestProcessCollector(t *testing.T) { regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), } { - if !re.Match(body) { - t.Errorf("want body to match %s\n%s", re, body) + if !re.Match(buf.Bytes()) { + t.Errorf("want body to match %s\n%s", re, buf.String()) } } } diff --git a/prometheus/promhttp/http.go b/prometheus/promhttp/http.go new file mode 100644 index 0000000..3116d2f --- /dev/null +++ b/prometheus/promhttp/http.go @@ -0,0 +1,201 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package promhttp contains functions to create http.Handler instances to +// expose Prometheus metrics via HTTP. In later versions of this package, it +// will also contain tooling to instrument instances of http.Handler and +// http.RoundTripper. +// +// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, +// you can create a handler for a custom registry or anything that implements +// the Gatherer interface. It also allows to create handlers that act +// differently on errors or allow to log errors. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The +// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP +// error, no error logging, and compression if requested by the client. +// +// If you want to create a Handler for the DefaultGatherer with different +// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and +// your desired HandlerOpts. +func Handler() http.Handler { + return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) +} + +// HandlerFor returns an http.Handler for the provided Gatherer. The behavior +// ef the Handler is defined by the provided HandlerOpts. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + // TODO(beorn7): Consider streaming serving of metrics. + }) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/prometheus/promhttp/http_test.go b/prometheus/promhttp/http_test.go new file mode 100644 index 0000000..d4a7d4a --- /dev/null +++ b/prometheus/promhttp/http_test.go @@ -0,0 +1,137 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package promhttp + +import ( + "bytes" + "errors" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +type errorCollector struct{} + +func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) +} + +func (e errorCollector) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.NewInvalidMetric( + prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), + errors.New("collect error"), + ) +} + +func TestHandlerErrorHandling(t *testing.T) { + + // Create a registry that collects a MetricFamily with two elements, + // another with one, and reports an error. + reg := prometheus.NewRegistry() + + cnt := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "the_count", + Help: "Ah-ah-ah! Thunder and lightning!", + }) + reg.MustRegister(cnt) + + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + reg.MustRegister(errorCollector{}) + + logBuf := &bytes.Buffer{} + logger := log.New(logBuf, "", 0) + + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + errorHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: HTTPErrorOnError, + }) + continueHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: ContinueOnError, + }) + panicHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: PanicOnError, + }) + wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantErrorBody := `An error has occurred during metrics gathering: + +error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantOKBody := `# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +# HELP the_count Ah-ah-ah! Thunder and lightning! +# TYPE the_count counter +the_count 0 +` + + errorHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusInternalServerError; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg) + } + if got := writer.Body.String(); got != wantErrorBody { + t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) + } + logBuf.Reset() + writer.Body.Reset() + writer.Code = http.StatusOK + + continueHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message %q, want %q", got, wantMsg) + } + if got := writer.Body.String(); got != wantOKBody { + t.Errorf("got body %q, want %q", got, wantOKBody) + } + + defer func() { + if err := recover(); err == nil { + t.Error("expected panic from panicHandler") + } + }() + panicHandler.ServeHTTP(writer, request) +} diff --git a/prometheus/push.go b/prometheus/push.go deleted file mode 100644 index 5ec0a3a..0000000 --- a/prometheus/push.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package prometheus - -// Push triggers a metric collection by the default registry and pushes all -// collected metrics to the Pushgateway specified by url. See the Pushgateway -// documentation for detailed implications of the job and instance -// parameter. instance can be left empty. You can use just host:port or ip:port -// as url, in which case 'http://' is added automatically. You can also include -// the schema in the URL. However, do not include the '/metrics/jobs/...' part. -// -// Note that all previously pushed metrics with the same job and instance will -// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' -// to push to the Pushgateway.) -func Push(job, instance, url string) error { - return defRegistry.Push(job, instance, url, "PUT") -} - -// PushAdd works like Push, but only previously pushed metrics with the same -// name (and the same job and instance) will be replaced. (It uses HTTP method -// 'POST' to push to the Pushgateway.) -func PushAdd(job, instance, url string) error { - return defRegistry.Push(job, instance, url, "POST") -} - -// PushCollectors works like Push, but it does not collect from the default -// registry. Instead, it collects from the provided collectors. It is a -// convenient way to push only a few metrics. -func PushCollectors(job, instance, url string, collectors ...Collector) error { - return pushCollectors(job, instance, url, "PUT", collectors...) -} - -// PushAddCollectors works like PushAdd, but it does not collect from the -// default registry. Instead, it collects from the provided collectors. It is a -// convenient way to push only a few metrics. -func PushAddCollectors(job, instance, url string, collectors ...Collector) error { - return pushCollectors(job, instance, url, "POST", collectors...) -} - -func pushCollectors(job, instance, url, method string, collectors ...Collector) error { - r := newRegistry() - for _, collector := range collectors { - if _, err := r.Register(collector); err != nil { - return err - } - } - return r.Push(job, instance, url, method) -} diff --git a/prometheus/push/examples_test.go b/prometheus/push/examples_test.go new file mode 100644 index 0000000..7f17ca2 --- /dev/null +++ b/prometheus/push/examples_test.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +func ExampleCollectors() { + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last succesful completion of a DB backup.", + }) + completionTime.Set(float64(time.Now().Unix())) + if err := push.Collectors( + "db_backup", push.HostnameGroupingKey(), + "http://pushgateway:9091", + completionTime, + ); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) + } +} + +func ExampleRegistry() { + registry := prometheus.NewRegistry() + + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last succesful completion of a DB backup.", + }) + registry.MustRegister(completionTime) + + completionTime.Set(float64(time.Now().Unix())) + if err := push.FromGatherer( + "db_backup", push.HostnameGroupingKey(), + "http://pushgateway:9091", + registry, + ); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) + } +} diff --git a/prometheus/push/push.go b/prometheus/push/push.go new file mode 100644 index 0000000..ae40402 --- /dev/null +++ b/prometheus/push/push.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package push provides functions to push metrics to a Pushgateway. The metrics +// to push are either collected from a provided registry, or from explicitly +// listed collectors. +// +// See the documentation of the Pushgateway to understand the meaning of the +// grouping parameters and the differences between push.Registry and +// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors +// on the other hand: https://github.com/prometheus/pushgateway +package push + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +const contentTypeHeader = "Content-Type" + +// FromGatherer triggers a metric collection by the provided Gatherer (which is +// usually implemented by a prometheus.Registry) and pushes all gathered metrics +// to the Pushgateway specified by url, using the provided job name and the +// (optional) further grouping labels (the grouping map may be nil). See the +// Pushgateway documentation for detailed implications of the job and other +// grouping labels. Neither the job name nor any grouping label value may +// contain a "/". The metrics pushed must not contain a job label of their own +// nor any of the grouping labels. +// +// You can use just host:port or ip:port as url, in which case 'http://' is +// added automatically. You can also include the schema in the URL. However, do +// not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and other grouping +// labels will be replaced with the metrics pushed by this call. (It uses HTTP +// method 'PUT' to push to the Pushgateway.) +func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "PUT") +} + +// AddFromGatherer works like FromGatherer, but only previously pushed metrics +// with the same name (and the same job and other grouping labels) will be +// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) +func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "POST") +} + +func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + if strings.HasSuffix(pushURL, "/") { + pushURL = pushURL[:len(pushURL)-1] + } + + if strings.Contains(job, "/") { + return fmt.Errorf("job contains '/': %s", job) + } + urlComponents := []string{url.QueryEscape(job)} + for ln, lv := range grouping { + if !model.LabelNameRE.MatchString(ln) { + return fmt.Errorf("grouping label has invalid name: %s", ln) + } + if strings.Contains(lv, "/") { + return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) + } + urlComponents = append(urlComponents, ln, lv) + } + pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) + + mfs, err := g.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} + +// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, +// it collects from the provided collectors directly. It is a convenient way to +// push only a few metrics. +func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "PUT", collectors...) +} + +// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. +// Instead, it collects from the provided collectors directly. It is a +// convenient way to push only a few metrics. +func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "POST", collectors...) +} + +func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { + r := prometheus.NewRegistry() + for _, collector := range collectors { + if err := r.Register(collector); err != nil { + return err + } + } + return push(job, grouping, url, r, method) +} + +// HostnameGroupingKey returns a label map with the only entry +// {instance=""}. This can be conveniently used as the grouping +// parameter if metrics should be pushed with the hostname as label. The +// returned map is created upon each call so that the caller is free to add more +// labels to the map. +func HostnameGroupingKey() map[string]string { + hostname, err := os.Hostname() + if err != nil { + return map[string]string{"instance": "unknown"} + } + return map[string]string{"instance": hostname} +} diff --git a/prometheus/push/push_test.go b/prometheus/push/push_test.go new file mode 100644 index 0000000..28ed9b7 --- /dev/null +++ b/prometheus/push/push_test.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package push + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestPush(t *testing.T) { + + var ( + lastMethod string + lastBody []byte + lastPath string + ) + + host, err := os.Hostname() + if err != nil { + t.Error(err) + } + + // Fake a Pushgateway that always responds with 202. + pgwOK := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lastMethod = r.Method + var err error + lastBody, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + lastPath = r.URL.EscapedPath() + w.Header().Set("Content-Type", `text/plain; charset=utf-8`) + w.WriteHeader(http.StatusAccepted) + }), + ) + defer pgwOK.Close() + + // Fake a Pushgateway that always responds with 500. + pgwErr := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "fake error", http.StatusInternalServerError) + }), + ) + defer pgwErr.Close() + + metric1 := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "testname1", + Help: "testhelp1", + }) + metric2 := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testname2", + Help: "testhelp2", + ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"}, + }) + + reg := prometheus.NewRegistry() + reg.MustRegister(metric1) + reg.MustRegister(metric2) + + mfs, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + t.Fatal(err) + } + } + wantBody := buf.Bytes() + + // PushCollectors, all good. + if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for PushCollectors, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob/instance/"+host { + t.Error("unexpected path:", lastPath) + } + + // PushAddCollectors, with nil grouping, all good. + if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POST for PushAddCollectors, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob" { + t.Error("unexpected path:", lastPath) + } + + // PushCollectors with a broken PGW. + if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push to broken Pushgateway succeeded") + } else { + if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { + t.Errorf("got error %q, want %q", got, want) + } + } + + // PushCollectors with invalid grouping or job. + if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with grouping contained in metrics succeeded") + } + if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with invalid job value succeeded") + } + if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with invalid grouping succeeded") + } + if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with invalid grouping succeeded") + } + + // Push registry, all good. + if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for Push, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + + // PushAdd registry, all good. + if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POSTT for PushAdd, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { + t.Error("unexpected path:", lastPath) + } +} diff --git a/prometheus/registry.go b/prometheus/registry.go index 00c184b..32a3986 100644 --- a/prometheus/registry.go +++ b/prometheus/registry.go @@ -11,224 +11,287 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - package prometheus import ( "bytes" - "compress/gzip" "errors" "fmt" - "io" - "net/http" - "net/url" "os" "sort" - "strings" "sync" "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" ) -var ( - defRegistry = newDefaultRegistry() - errAlreadyReg = errors.New("duplicate metrics collector registration attempted") -) - -// Constants relevant to the HTTP interface. const ( - // APIVersion is the version of the format of the exported data. This - // will match this library's version, which subscribes to the Semantic - // Versioning scheme. - APIVersion = "0.0.4" - - // DelimitedTelemetryContentType is the content type set on telemetry - // data responses in delimited protobuf format. - DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` - // TextTelemetryContentType is the content type set on telemetry data - // responses in text format. - TextTelemetryContentType = `text/plain; version=` + APIVersion - // ProtoTextTelemetryContentType is the content type set on telemetry - // data responses in protobuf text format. (Only used for debugging.) - ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` - // ProtoCompactTextTelemetryContentType is the content type set on - // telemetry data responses in protobuf compact text format. (Only used - // for debugging.) - ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` - - // Constants for object pools. - numBufs = 4 - numMetricFamilies = 1000 - numMetrics = 10000 - // Capacity for the channel to collect metrics and descriptors. capMetricChan = 1000 capDescChan = 10 - - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - - acceptEncodingHeader = "Accept-Encoding" - acceptHeader = "Accept" ) -// Handler returns the HTTP handler for the global Prometheus registry. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). Usually the handler is used to handle the "/metrics" endpoint. -// -// Please note the issues described in the doc comment of InstrumentHandler. You -// might want to consider using UninstrumentedHandler instead. -func Handler() http.Handler { - return InstrumentHandler("prometheus", defRegistry) +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) } -// UninstrumentedHandler works in the same way as Handler, but the returned HTTP -// handler is not instrumented. This is useful if no instrumentation is desired -// (for whatever reason) or if the instrumentation has to happen with a -// different handler name (or with a different instrumentation approach -// altogether). See the InstrumentHandler example. -func UninstrumentedHandler() http.Handler { - return defRegistry -} - -// Register registers a new Collector to be included in metrics collection. It -// returns an error if the descriptors provided by the Collector are invalid or -// if they - in combination with descriptors of already registered Collectors - -// do not fulfill the consistency and uniqueness criteria described in the Desc -// documentation. -// -// Do not register the same Collector multiple times concurrently. (Registering -// the same Collector twice would result in an error anyway, but on top of that, -// it is not safe to do so concurrently.) -func Register(m Collector) error { - _, err := defRegistry.Register(m) - return err -} - -// MustRegister works like Register but panics where Register would have -// returned an error. MustRegister is also Variadic, where Register only -// accepts a single Collector to register. -func MustRegister(m ...Collector) { - for i := range m { - if err := Register(m[i]); err != nil { - panic(err) - } +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, } } -// RegisterOrGet works like Register but does not return an error if a Collector -// is registered that equals a previously registered Collector. (Two Collectors -// are considered equal if their Describe method yields the same set of -// descriptors.) Instead, the previously registered Collector is returned (which -// is helpful if the new and previously registered Collectors are equal but not -// identical, i.e. not pointers to the same object). +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. // -// As for Register, it is still not safe to call RegisterOrGet with the same -// Collector multiple times concurrently. -func RegisterOrGet(m Collector) (Collector, error) { - return defRegistry.RegisterOrGet(m) +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r } -// MustRegisterOrGet works like RegisterOrGet but panics where RegisterOrGet -// would have returned an error. -func MustRegisterOrGet(m Collector) Collector { - existing, err := RegisterOrGet(m) +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather then the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// RegisterOrGet registers the provided Collector with the DefaultRegisterer and +// returns the Collector, unless an equal Collector was registered before, in +// which case that Collector is returned. +// +// Deprecated: RegisterOrGet is merely a convenience function for the +// implementation as described in the documentation for +// AlreadyRegisteredError. As the use case is relatively rare, this function +// will be removed in a future version of this package to clean up the +// namespace. +func RegisterOrGet(c Collector) (Collector, error) { + if err := Register(c); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + return are.ExistingCollector, nil + } + return nil, err + } + return c, nil +} + +// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning +// an error. +// +// Deprecated: This is deprecated for the same reason RegisterOrGet is. See +// there for details. +func MustRegisterOrGet(c Collector) Collector { + c, err := RegisterOrGet(c) if err != nil { panic(err) } - return existing + return c } -// Unregister unregisters the Collector that equals the Collector passed in as -// an argument. (Two Collectors are considered equal if their Describe method -// yields the same set of descriptors.) The function returns whether a Collector -// was unregistered. +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. func Unregister(c Collector) bool { - return defRegistry.Unregister(c) + return DefaultRegisterer.Unregister(c) } -// SetMetricFamilyInjectionHook sets a function that is called whenever metrics -// are collected. The hook function must be set before metrics collection begins -// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The -// MetricFamily protobufs returned by the hook function are merged with the -// metrics collected in the usual way. +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that +// gathers from the previous DefaultGatherers but then merges the MetricFamily +// protobufs returned from the provided hook function with the MetricFamily +// protobufs returned from the original DefaultGatherer. // -// This is a way to directly inject MetricFamily protobufs managed and owned by -// the caller. The caller has full responsibility. As no registration of the -// injected metrics has happened, there is no descriptor to check against, and -// there are no registration-time checks. If collect-time checks are disabled -// (see function EnableCollectChecks), no sanity checks are performed on the -// returned protobufs at all. If collect-checks are enabled, type and uniqueness -// checks are performed, but no further consistency checks (which would require -// knowledge of a metric descriptor). -// -// Sorting concerns: The caller is responsible for sorting the label pairs in -// each metric. However, the order of metrics will be sorted by the registry as -// it is required anyway after merging with the metric families collected -// conventionally. -// -// The function must be callable at any time and concurrently. +// Deprecated: This function manipulates the DefaultGatherer variable. Consider +// the implications, i.e. don't do this concurrently with any uses of the +// DefaultGatherer. In the rare cases where you need to inject MetricFamily +// protobufs directly, it is recommended to use a custom Registry and combine it +// with a custom Gatherer using the Gatherers type (see +// there). SetMetricFamilyInjectionHook only exists for compatibility reasons +// with previous versions of this package. func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - defRegistry.metricFamilyInjectionHook = hook + DefaultGatherer = Gatherers{ + DefaultGatherer, + GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), + } } -// PanicOnCollectError sets the behavior whether a panic is caused upon an error -// while metrics are collected and served to the HTTP endpoint. By default, an -// internal server error (status code 500) is served with an error message. -func PanicOnCollectError(b bool) { - defRegistry.panicOnCollectError = b +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector } -// EnableCollectChecks enables (or disables) additional consistency checks -// during metrics collection. These additional checks are not enabled by default -// because they inflict a performance penalty and the errors they check for can -// only happen if the used Metric and Collector types have internal programming -// errors. It can be helpful to enable these checks while working with custom -// Collectors or Metrics whose correctness is not well established yet. -func EnableCollectChecks(b bool) { - defRegistry.collectChecksEnabled = b +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" } -// encoder is a function that writes a dto.MetricFamily to an io.Writer in a -// certain encoding. It returns the number of bytes written and any error -// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText -// are encoders. -type encoder func(io.Writer, *dto.MetricFamily) (int, error) +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error -type registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - bufPool chan *bytes.Buffer - metricFamilyPool chan *dto.MetricFamily - metricPool chan *dto.Metric - metricFamilyInjectionHook func() []*dto.MetricFamily - - panicOnCollectError, collectChecksEnabled bool +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() } -func (r *registry) Register(c Collector) (Collector, error) { - descChan := make(chan *Desc, capDescChan) +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) go func() { c.Describe(descChan) close(descChan) }() - - newDescIDs := map[uint64]struct{}{} - newDimHashesByName := map[string]uint64{} - var collectorID uint64 // Just a sum of all desc IDs. - var duplicateDescErr error - r.mtx.Lock() defer r.mtx.Unlock() // Coduct various tests... @@ -236,7 +299,7 @@ func (r *registry) Register(c Collector) (Collector, error) { // Is the descriptor valid at all? if desc.err != nil { - return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) } // Is the descID unique? @@ -257,13 +320,13 @@ func (r *registry) Register(c Collector) (Collector, error) { // First check existing descriptors... if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { - return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } } else { // ...then check the new descriptors already seen. if dimHash, exists := newDimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { - return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } } else { newDimHashesByName[desc.fqName] = desc.dimHash @@ -272,15 +335,18 @@ func (r *registry) Register(c Collector) (Collector, error) { } // Did anything happen at all? if len(newDescIDs) == 0 { - return nil, errors.New("collector has no descriptors") + return errors.New("collector has no descriptors") } if existing, exists := r.collectorsByID[collectorID]; exists { - return existing, errAlreadyReg + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } } // If the collectorID is new, but at least one of the descs existed // before, we are in trouble. if duplicateDescErr != nil { - return nil, duplicateDescErr + return duplicateDescErr } // Only after all tests have passed, actually register. @@ -291,26 +357,20 @@ func (r *registry) Register(c Collector) (Collector, error) { for name, dimHash := range newDimHashesByName { r.dimHashesByName[name] = dimHash } - return c, nil + return nil } -func (r *registry) RegisterOrGet(m Collector) (Collector, error) { - existing, err := r.Register(m) - if err != nil && err != errAlreadyReg { - return nil, err - } - return existing, nil -} - -func (r *registry) Unregister(c Collector) bool { - descChan := make(chan *Desc, capDescChan) +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) go func() { c.Describe(descChan) close(descChan) }() - - descIDs := map[uint64]struct{}{} - var collectorID uint64 // Just a sum of the desc IDs. for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { collectorID += desc.id @@ -337,72 +397,25 @@ func (r *registry) Unregister(c Collector) bool { return true } -func (r *registry) Push(job, instance, pushURL, method string) error { - if !strings.Contains(pushURL, "://") { - pushURL = "http://" + pushURL - } - if strings.HasSuffix(pushURL, "/") { - pushURL = pushURL[:len(pushURL)-1] - } - pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) - if instance != "" { - pushURL += "/instances/" + url.QueryEscape(instance) - } - buf := r.getBuf() - defer r.giveBuf(buf) - if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { - if r.panicOnCollectError { +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { panic(err) } - return err } - req, err := http.NewRequest(method, pushURL, buf) - if err != nil { - return err - } - req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 202 { - return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) - } - return nil } -func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { - contentType := expfmt.Negotiate(req.Header) - buf := r.getBuf() - defer r.giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { - if r.panicOnCollectError { - panic(err) - } - http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) -} - -func (r *registry) writePB(encoder expfmt.Encoder) error { - var metricHashes map[uint64]struct{} - if r.collectChecksEnabled { - metricHashes = make(map[uint64]struct{}) - } - metricChan := make(chan Metric, capMetricChan) - wg := sync.WaitGroup{} +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) r.mtx.RLock() metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) @@ -420,6 +433,16 @@ func (r *registry) writePB(encoder expfmt.Encoder) error { collector.Collect(metricChan) }(collector) } + + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() // Drain metricChan in case of premature return. @@ -434,269 +457,189 @@ func (r *registry) writePB(encoder expfmt.Encoder) error { // of metricFamiliesByName (and of metricHashes if checks are // enabled). Most likely not worth it. desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, + )) + continue + } metricFamily, ok := metricFamiliesByName[desc.fqName] - if !ok { - metricFamily = r.getMetricFamily() - defer r.giveMetricFamily(metricFamily) + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} metricFamily.Name = proto.String(desc.fqName) metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue + } metricFamiliesByName[desc.fqName] = metricFamily } - dtoMetric := r.getMetric() - defer r.giveMetric(dtoMetric) - if err := metric.Write(dtoMetric); err != nil { - // TODO: Consider different means of error reporting so - // that a single erroneous metric could be skipped - // instead of blowing up the whole collection. - return fmt.Errorf("error collecting metric %v: %s", desc, err) + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue } - switch { - case metricFamily.Type != nil: - // Type already set. We are good. - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if r.collectChecksEnabled { - if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { - return err + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue } } metricFamily.Metric = append(metricFamily.Metric, dtoMetric) } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} - if r.metricFamilyInjectionHook != nil { - for _, mf := range r.metricFamilyInjectionHook() { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if !exists { - metricFamiliesByName[mf.GetName()] = mf - if r.collectChecksEnabled { - for _, m := range mf.Metric { - if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { - return err - } - } +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) } - continue + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { - if r.collectChecksEnabled { - if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { - return err - } + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue } existingMF.Metric = append(existingMF.Metric, m) } } } - - // Now that MetricFamilies are all set, sort their Metrics - // lexicographically by their label values. - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - - // Write out MetricFamilies sorted by their name. - names := make([]string, 0, len(metricFamiliesByName)) - for name := range metricFamiliesByName { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - if err := encoder.Encode(metricFamiliesByName[name]); err != nil { - return err - } - } - return nil -} - -func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), - ) - } - - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. Label pairs must be sorted by contract. But the point of this - // method is to check for contract violations. So we better do the sort - // now. - sort.Sort(LabelPairSorter(dtoMetric.Label)) - for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - } - if _, exists := metricHashes[h]; exists { - return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, - ) - } - metricHashes[h] = struct{}{} - - if desc == nil { - return nil // Nothing left to check if we have no desc. - } - - // Desc consistency with metric family. - if metricFamily.GetName() != desc.fqName { - return fmt.Errorf( - "collected metric %s %s has name %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, - ) - } - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - - r.mtx.RLock() // Remaining checks need the read lock. - defer r.mtx.RUnlock() - - // Is the desc registered? - if _, exist := r.descIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - - return nil -} - -func (r *registry) getBuf() *bytes.Buffer { - select { - case buf := <-r.bufPool: - return buf - default: - return &bytes.Buffer{} - } -} - -func (r *registry) giveBuf(buf *bytes.Buffer) { - buf.Reset() - select { - case r.bufPool <- buf: - default: - } -} - -func (r *registry) getMetricFamily() *dto.MetricFamily { - select { - case mf := <-r.metricFamilyPool: - return mf - default: - return &dto.MetricFamily{} - } -} - -func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { - mf.Reset() - select { - case r.metricFamilyPool <- mf: - default: - } -} - -func (r *registry) getMetric() *dto.Metric { - select { - case m := <-r.metricPool: - return m - default: - return &dto.Metric{} - } -} - -func (r *registry) giveMetric(m *dto.Metric) { - m.Reset() - select { - case r.metricPool <- m: - default: - } -} - -func newRegistry() *registry { - return ®istry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - bufPool: make(chan *bytes.Buffer, numBufs), - metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), - metricPool: make(chan *dto.Metric, numMetrics), - } -} - -func newDefaultRegistry() *registry { - r := newRegistry() - r.Register(NewProcessCollector(os.Getpid(), "")) - r.Register(NewGoCollector()) - return r -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// metricSorter is a sortable slice of *dto.Metric. type metricSorter []*dto.Metric func (s metricSorter) Len() int { @@ -739,3 +682,125 @@ func (s metricSorter) Less(i, j int) bool { } return s[i].GetTimestampMs() < s[j].GetTimestampMs() } + +// normalizeMetricFamilies returns a MetricFamily slice whith empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/prometheus/registry_test.go b/prometheus/registry_test.go index f30c90c..9dacb62 100644 --- a/prometheus/registry_test.go +++ b/prometheus/registry_test.go @@ -17,41 +17,30 @@ // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file. -package prometheus +package prometheus_test import ( "bytes" - "encoding/binary" "net/http" + "net/http/httptest" "testing" - "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" ) -type fakeResponseWriter struct { - header http.Header - body bytes.Buffer -} - -func (r *fakeResponseWriter) Header() http.Header { - return r.header -} - -func (r *fakeResponseWriter) Write(d []byte) (l int, err error) { - return r.body.Write(d) -} - -func (r *fakeResponseWriter) WriteHeader(c int) { -} - func testHandler(t testing.TB) { - metricVec := NewCounterVec( - CounterOpts{ + metricVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ Name: "name", Help: "docstring", - ConstLabels: Labels{"constname": "constvalue"}, + ConstLabels: prometheus.Labels{"constname": "constvalue"}, }, []string{"labelname"}, ) @@ -59,8 +48,6 @@ func testHandler(t testing.TB) { metricVec.WithLabelValues("val1").Inc() metricVec.WithLabelValues("val2").Inc() - varintBuf := make([]byte, binary.MaxVarintLen32) - externalMetricFamily := &dto.MetricFamily{ Name: proto.String("externalname"), Help: proto.String("externaldocstring"), @@ -83,18 +70,9 @@ func testHandler(t testing.TB) { }, }, } - marshaledExternalMetricFamily, err := proto.Marshal(externalMetricFamily) - if err != nil { - t.Fatal(err) - } - var externalBuf bytes.Buffer - l := binary.PutUvarint(varintBuf, uint64(len(marshaledExternalMetricFamily))) - _, err = externalBuf.Write(varintBuf[:l]) - if err != nil { - t.Fatal(err) - } - _, err = externalBuf.Write(marshaledExternalMetricFamily) - if err != nil { + externalBuf := &bytes.Buffer{} + enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) + if err := enc.Encode(externalMetricFamily); err != nil { t.Fatal(err) } externalMetricFamilyAsBytes := externalBuf.Bytes() @@ -160,18 +138,9 @@ metric: < }, }, } - marshaledExpectedMetricFamily, err := proto.Marshal(expectedMetricFamily) - if err != nil { - t.Fatal(err) - } - var buf bytes.Buffer - l = binary.PutUvarint(varintBuf, uint64(len(marshaledExpectedMetricFamily))) - _, err = buf.Write(varintBuf[:l]) - if err != nil { - t.Fatal(err) - } - _, err = buf.Write(marshaledExpectedMetricFamily) - if err != nil { + buf := &bytes.Buffer{} + enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + if err := enc.Encode(expectedMetricFamily); err != nil { t.Fatal(err) } expectedMetricFamilyAsBytes := buf.Bytes() @@ -216,7 +185,7 @@ metric: < externalMetricFamilyWithSameName := &dto.MetricFamily{ Name: proto.String("name"), - Help: proto.String("inconsistent help string does not matter here"), + Help: proto.String("docstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { @@ -248,7 +217,7 @@ metric: < var scenarios = []struct { headers map[string]string out output - collector Collector + collector prometheus.Collector externalMF []*dto.MetricFamily }{ { // 0 @@ -485,21 +454,22 @@ metric: < }, } for i, scenario := range scenarios { - registry := newRegistry() - registry.collectChecksEnabled = true + registry := prometheus.NewPedanticRegistry() + gatherer := prometheus.Gatherer(registry) + if scenario.externalMF != nil { + gatherer = prometheus.Gatherers{ + registry, + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { + return scenario.externalMF, nil + }), + } + } if scenario.collector != nil { registry.Register(scenario.collector) } - if scenario.externalMF != nil { - registry.metricFamilyInjectionHook = func() []*dto.MetricFamily { - return scenario.externalMF - } - } - writer := &fakeResponseWriter{ - header: http.Header{}, - } - handler := InstrumentHandler("prometheus", registry) + writer := httptest.NewRecorder() + handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) request, _ := http.NewRequest("GET", "/", nil) for key, value := range scenario.headers { request.Header.Add(key, value) @@ -507,7 +477,7 @@ metric: < handler(writer, request) for key, value := range scenario.out.headers { - if writer.Header().Get(key) != value { + if writer.HeaderMap.Get(key) != value { t.Errorf( "%d. expected %q for header %q, got %q", i, value, key, writer.Header().Get(key), @@ -515,10 +485,10 @@ metric: < } } - if !bytes.Equal(scenario.out.body, writer.body.Bytes()) { + if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { t.Errorf( - "%d. expected %q for body, got %q", - i, scenario.out.body, writer.body.Bytes(), + "%d. expected body:\n%s\ngot body:\n%s\n", + i, scenario.out.body, writer.Body.Bytes(), ) } } @@ -533,3 +503,43 @@ func BenchmarkHandler(b *testing.B) { testHandler(b) } } + +func TestRegisterWithOrGet(t *testing.T) { + // Replace the default registerer just to be sure. This is bad, but this + // whole test will go away once RegisterOrGet is removed. + oldRegisterer := prometheus.DefaultRegisterer + defer func() { + prometheus.DefaultRegisterer = oldRegisterer + }() + prometheus.DefaultRegisterer = prometheus.NewRegistry() + original := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + equalButNotSame := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + if err := prometheus.Register(original); err != nil { + t.Fatal(err) + } + if err := prometheus.Register(equalButNotSame); err == nil { + t.Fatal("expected error when registringe equal collector") + } + existing, err := prometheus.RegisterOrGet(equalButNotSame) + if err != nil { + t.Fatal(err) + } + if existing != original { + t.Error("expected original collector but got something else") + } + if existing == equalButNotSame { + t.Error("expected original callector but got new one") + } +} diff --git a/prometheus/summary.go b/prometheus/summary.go index eb84961..fda213c 100644 --- a/prometheus/summary.go +++ b/prometheus/summary.go @@ -53,8 +53,8 @@ type Summary interface { Observe(float64) } +// DefObjectives are the default Summary quantile values. var ( - // DefObjectives are the default Summary quantile values. DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} errQuantileLabelNotAllowed = fmt.Errorf( @@ -139,11 +139,11 @@ type SummaryOpts struct { BufCap uint32 } -// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge -// method of perk/quantile is actually not working as advertised - and it might -// be unfixable, as the underlying algorithm is apparently not capable of -// merging summaries in the first place. To avoid using Merge, we are currently -// adding observations to _each_ age bucket, i.e. the effort to add a sample is +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is // essentially multiplied by the number of age buckets. When rotating age // buckets, we empty the previous head stream. On scrape time, we simply take // the quantiles from the head stream (no merging required). Result: More effort @@ -227,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { } sort.Float64s(s.sortedObjectives) - s.Init(s) // Init self-collection. + s.init(s) // Init self-collection. return s } type summary struct { - SelfCollector + selfCollector bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. mtx sync.Mutex // Protects every other moving part. diff --git a/prometheus/summary_test.go b/prometheus/summary_test.go index 0790cdf..c4575ff 100644 --- a/prometheus/summary_test.go +++ b/prometheus/summary_test.go @@ -329,8 +329,8 @@ func TestSummaryDecay(t *testing.T) { } func getBounds(vars []float64, q, ε float64) (min, max float64) { - // TODO: This currently tolerates an error of up to 2*ε. The error must - // be at most ε, but for some reason, it's sometimes slightly + // TODO(beorn7): This currently tolerates an error of up to 2*ε. The + // error must be at most ε, but for some reason, it's sometimes slightly // higher. That's a bug. n := float64(len(vars)) lower := int((q - 2*ε) * n) diff --git a/prometheus/value.go b/prometheus/value.go index b54ac11..a944c37 100644 --- a/prometheus/value.go +++ b/prometheus/value.go @@ -48,7 +48,7 @@ type value struct { // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG valBits uint64 - SelfCollector + selfCollector desc *Desc valType ValueType @@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin valBits: math.Float64bits(val), labelPairs: makeLabelPairs(desc, labelValues), } - result.Init(result) + result.init(result) return result } @@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error { // library to back the implementations of CounterFunc, GaugeFunc, and // UntypedFunc. type valueFunc struct { - SelfCollector + selfCollector desc *Desc valType ValueType @@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val function: function, labelPairs: makeLabelPairs(desc, nil), } - result.Init(result) + result.init(result) return result }