Update Collector example

It now uses the new WrapWith function instead of ConstLabels. Describe
is now implemented via DescribeByCollect.

Signed-off-by: beorn7 <beorn@soundcloud.com>
This commit is contained in:
beorn7 2018-09-11 23:31:49 +02:00
parent 84d7aa0cd9
commit 837c7cb1f4
3 changed files with 55 additions and 42 deletions

View File

@ -81,6 +81,8 @@ type Collector interface {
// it might even get registered as an unchecked Collecter (cf. the Register // it might even get registered as an unchecked Collecter (cf. the Register
// method of the Registerer interface). Hence, only use this shortcut // method of the Registerer interface). Hence, only use this shortcut
// implementation of Describe if you are certain to fulfill the contract. // implementation of Describe if you are certain to fulfill the contract.
//
// The Collector example demonstrates a use of DescribeByCollect.
func DescribeByCollect(c Collector, descs chan<- *Desc) { func DescribeByCollect(c Collector, descs chan<- *Desc) {
metrics := make(chan Metric) metrics := make(chan Metric)
go func() { go func() {

View File

@ -17,18 +17,18 @@ import "github.com/prometheus/client_golang/prometheus"
// ClusterManager is an example for a system that might have been built without // ClusterManager is an example for a system that might have been built without
// Prometheus in mind. It models a central manager of jobs running in a // Prometheus in mind. It models a central manager of jobs running in a
// cluster. To turn it into something that collects Prometheus metrics, we // cluster. Thus, we implement a custom Collector called
// simply add the two methods required for the Collector interface. // ClusterManagerCollector, which collects information from a ClusterManager
// using its provided methods and turns them into Prometheus Metrics for
// collection.
// //
// An additional challenge is that multiple instances of the ClusterManager are // An additional challenge is that multiple instances of the ClusterManager are
// run within the same binary, each in charge of a different zone. We need to // run within the same binary, each in charge of a different zone. We need to
// make use of ConstLabels to be able to register each ClusterManager instance // make use of wrapping Registerers to be able to register each
// with Prometheus. // ClusterManagerCollector instance with Prometheus.
type ClusterManager struct { type ClusterManager struct {
Zone string Zone string
OOMCountDesc *prometheus.Desc // Contains many more fields not listed in this example.
RAMUsageDesc *prometheus.Desc
// ... many more fields
} }
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a // ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
@ -50,10 +50,30 @@ func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
return return
} }
// Describe simply sends the two Descs in the struct to the channel. // ClusterManagerCollector implements the Collector interface.
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { type ClusterManagerCollector struct {
ch <- c.OOMCountDesc ClusterManager *ClusterManager
ch <- c.RAMUsageDesc }
// Descriptors used by the ClusterManagerCollector below.
var (
oomCountDesc = prometheus.NewDesc(
"clustermanager_oom_crashes_total",
"Number of OOM crashes.",
[]string{"host"}, nil,
)
ramUsageDesc = prometheus.NewDesc(
"clustermanager_ram_usage_bytes",
"RAM usage as reported to the cluster manager.",
[]string{"host"}, nil,
)
)
// Describe is implemented with DescribeByCollect. That's possible because the
// Collect method will always return the same two metrics with the same two
// descriptors.
func (cc ClusterManagerCollector) Describe(ch chan<- *prometheus.Desc) {
prometheus.DescribeByCollect(cc, ch)
} }
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it // Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
@ -61,11 +81,11 @@ func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
// //
// Note that Collect could be called concurrently, so we depend on // Note that Collect could be called concurrently, so we depend on
// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. // ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { func (cc ClusterManagerCollector) Collect(ch chan<- prometheus.Metric) {
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() oomCountByHost, ramUsageByHost := cc.ClusterManager.ReallyExpensiveAssessmentOfTheSystemState()
for host, oomCount := range oomCountByHost { for host, oomCount := range oomCountByHost {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.OOMCountDesc, oomCountDesc,
prometheus.CounterValue, prometheus.CounterValue,
float64(oomCount), float64(oomCount),
host, host,
@ -73,7 +93,7 @@ func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
} }
for host, ramUsage := range ramUsageByHost { for host, ramUsage := range ramUsageByHost {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.RAMUsageDesc, ramUsageDesc,
prometheus.GaugeValue, prometheus.GaugeValue,
ramUsage, ramUsage,
host, host,
@ -81,38 +101,27 @@ func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
} }
} }
// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note // NewClusterManager first creates a Prometheus-ignorant ClusterManager
// that the zone is set as a ConstLabel. (It's different in each instance of the // instance. Then, it creates a ClusterManagerCollector for the just created
// ClusterManager, but constant over the lifetime of an instance.) Then there is // ClusterManager. Finally, it registers the ClusterManagerCollector with a
// a variable label "host", since we want to partition the collected metrics by // wrapping Registerer that adds the zone as a label. In this way, the metrics
// host. Since all Descs created in this way are consistent across instances, // collected by different ClusterManagerCollectors do not collide.
// with a guaranteed distinction by the "zone" label, we can register different func NewClusterManager(zone string, reg prometheus.Registerer) *ClusterManager {
// ClusterManager instances with the same registry. c := &ClusterManager{
func NewClusterManager(zone string) *ClusterManager {
return &ClusterManager{
Zone: zone, Zone: zone,
OOMCountDesc: prometheus.NewDesc(
"clustermanager_oom_crashes_total",
"Number of OOM crashes.",
[]string{"host"},
prometheus.Labels{"zone": zone},
),
RAMUsageDesc: prometheus.NewDesc(
"clustermanager_ram_usage_bytes",
"RAM usage as reported to the cluster manager.",
[]string{"host"},
prometheus.Labels{"zone": zone},
),
} }
cc := ClusterManagerCollector{ClusterManager: c}
prometheus.WrapWith(prometheus.Labels{"zone": zone}, reg).MustRegister(cc)
return c
} }
func ExampleCollector() { func ExampleCollector() {
workerDB := NewClusterManager("db")
workerCA := NewClusterManager("ca")
// Since we are dealing with custom Collector implementations, it might // Since we are dealing with custom Collector implementations, it might
// be a good idea to try it out with a pedantic registry. // be a good idea to try it out with a pedantic registry.
reg := prometheus.NewPedanticRegistry() reg := prometheus.NewPedanticRegistry()
reg.MustRegister(workerDB)
reg.MustRegister(workerCA) // Construct cluster managers. In real code, we would assign them to
// variables to then do something with them.
NewClusterManager("db", reg)
NewClusterManager("ca", reg)
} }

View File

@ -30,6 +30,8 @@ import (
// //
// WrapWith provides a way to add fixed labels to a subset of Collectors. It // WrapWith provides a way to add fixed labels to a subset of Collectors. It
// should not be used to add fixed labels to all metrics exposed. // should not be used to add fixed labels to all metrics exposed.
//
// The Collector example demonstrates a use of WrapWith.
func WrapWith(labels Labels, reg Registerer) Registerer { func WrapWith(labels Labels, reg Registerer) Registerer {
return &wrappingRegisterer{ return &wrappingRegisterer{
wrappedRegisterer: reg, wrappedRegisterer: reg,