Merge pull request #13 from matttproud/feature/labels/base-exposed
Update client to reflect new API needs.
This commit is contained in:
commit
5a29c27491
15
README.md
15
README.md
|
@ -1,3 +1,18 @@
|
|||
# Major Notes
|
||||
The project's documentation is *not up-to-date due to rapidly-changing
|
||||
requirements* that have quieted down in the interim, but the overall API should
|
||||
be stable for several months even if things change under the hood.
|
||||
|
||||
An update to reflect the current state is pending. Key changes for current
|
||||
users:
|
||||
|
||||
1. The code has been qualified in production environments.
|
||||
2. Label-oriented metric exposition and registration, including docstrings.
|
||||
3. Deprecation of gocheck in favor of native table-driven tests.
|
||||
4. The best way to get a handle on this is to look at the examples.
|
||||
|
||||
Barring that, the antique documentation is below:
|
||||
|
||||
# Overview
|
||||
This [Go](http://golang.org) package is an extraction of a piece of
|
||||
instrumentation code I whipped-up for a personal project that a friend of mine
|
||||
|
|
28
constants.go
28
constants.go
|
@ -1,14 +1,26 @@
|
|||
/*
|
||||
Copyright (c) 2013, Matt T. Proud
|
||||
All rights reserved.
|
||||
|
||||
Use of this source code is governed by a BSD-style license that can be found in
|
||||
the LICENSE file.
|
||||
*/
|
||||
// Copyright (c) 2013, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found in
|
||||
// the LICENSE file.
|
||||
|
||||
package registry
|
||||
|
||||
var (
|
||||
// NilLabels is a nil set of labels merely for end-user convenience.
|
||||
NilLabels map[string]string
|
||||
NilLabels map[string]string = nil
|
||||
|
||||
// A prefix to be used to namespace instrumentation flags from others.
|
||||
FlagNamespace = "telemetry."
|
||||
|
||||
// The format of the exported data. This will match this library's version,
|
||||
// which subscribes to the Semantic Versioning scheme.
|
||||
APIVersion = "0.0.1"
|
||||
|
||||
ProtocolVersionHeader = "X-Prometheus-API-Version"
|
||||
|
||||
baseLabelsKey = "baseLabels"
|
||||
docstringKey = "docstring"
|
||||
metricKey = "metric"
|
||||
nameLabel = "name"
|
||||
)
|
||||
|
|
|
@ -37,46 +37,29 @@ func init() {
|
|||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
foo_rpc_latency := metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
rpc_latency := metrics.NewHistogram(&metrics.HistogramSpecification{
|
||||
Starts: metrics.EquallySizedBucketsFor(0, 200, 4),
|
||||
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 50),
|
||||
BucketBuilder: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 50),
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
|
||||
})
|
||||
foo_rpc_calls := &metrics.CounterMetric{}
|
||||
bar_rpc_latency := metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
Starts: metrics.EquallySizedBucketsFor(0, 200, 4),
|
||||
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 50),
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
|
||||
})
|
||||
bar_rpc_calls := &metrics.CounterMetric{}
|
||||
zed_rpc_latency := metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
Starts: metrics.EquallySizedBucketsFor(0, 200, 4),
|
||||
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 50),
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
|
||||
})
|
||||
zed_rpc_calls := &metrics.CounterMetric{}
|
||||
|
||||
rpc_calls := metrics.NewCounter()
|
||||
|
||||
metrics := registry.NewRegistry()
|
||||
|
||||
nilBaseLabels := make(map[string]string)
|
||||
|
||||
metrics.Register("rpc_latency_foo_microseconds", "RPC latency for foo service.", nilBaseLabels, foo_rpc_latency)
|
||||
metrics.Register("rpc_calls_foo_total", "RPC calls for foo service.", nilBaseLabels, foo_rpc_calls)
|
||||
metrics.Register("rpc_latency_bar_microseconds", "RPC latency for bar service.", nilBaseLabels, bar_rpc_latency)
|
||||
metrics.Register("rpc_calls_bar_total", "RPC calls for bar service.", nilBaseLabels, bar_rpc_calls)
|
||||
metrics.Register("rpc_latency_zed_microseconds", "RPC latency for zed service.", nilBaseLabels, zed_rpc_latency)
|
||||
metrics.Register("rpc_calls_zed_total", "RPC calls for zed service.", nilBaseLabels, zed_rpc_calls)
|
||||
metrics.Register("rpc_latency_microseconds", "RPC latency.", registry.NilLabels, rpc_latency)
|
||||
metrics.Register("rpc_calls_total", "RPC calls.", registry.NilLabels, rpc_calls)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
foo_rpc_latency.Add(rand.Float64() * 200)
|
||||
foo_rpc_calls.Increment()
|
||||
rpc_latency.Add(map[string]string{"service": "foo"}, rand.Float64()*200)
|
||||
rpc_calls.Increment(map[string]string{"service": "foo"})
|
||||
|
||||
bar_rpc_latency.Add((rand.NormFloat64() * 10.0) + 100.0)
|
||||
bar_rpc_calls.Increment()
|
||||
rpc_latency.Add(map[string]string{"service": "bar"}, (rand.NormFloat64()*10.0)+100.0)
|
||||
rpc_calls.Increment(map[string]string{"service": "bar"})
|
||||
|
||||
zed_rpc_latency.Add(rand.ExpFloat64())
|
||||
zed_rpc_calls.Increment()
|
||||
rpc_latency.Add(map[string]string{"service": "zed"}, rand.ExpFloat64())
|
||||
rpc_calls.Increment(map[string]string{"service": "zed"})
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
|
|
@ -20,11 +20,11 @@ import (
|
|||
)
|
||||
|
||||
type AccumulatingBucket struct {
|
||||
observations int
|
||||
elements utility.PriorityQueue
|
||||
evictionPolicy EvictionPolicy
|
||||
maximumSize int
|
||||
mutex sync.RWMutex
|
||||
evictionPolicy EvictionPolicy
|
||||
observations int
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -35,9 +35,9 @@ behavior set.
|
|||
func AccumulatingBucketBuilder(evictionPolicy EvictionPolicy, maximumSize int) BucketBuilder {
|
||||
return func() Bucket {
|
||||
return &AccumulatingBucket{
|
||||
maximumSize: maximumSize,
|
||||
evictionPolicy: evictionPolicy,
|
||||
elements: make(utility.PriorityQueue, 0, maximumSize),
|
||||
evictionPolicy: evictionPolicy,
|
||||
maximumSize: maximumSize,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -54,8 +54,8 @@ func (b *AccumulatingBucket) Add(value float64) {
|
|||
size := len(b.elements)
|
||||
|
||||
v := utility.Item{
|
||||
Value: value,
|
||||
Priority: -1 * time.Now().UnixNano(),
|
||||
Value: value,
|
||||
}
|
||||
|
||||
if size == b.maximumSize {
|
||||
|
@ -69,7 +69,7 @@ func (b *AccumulatingBucket) String() string {
|
|||
b.mutex.RLock()
|
||||
defer b.mutex.RUnlock()
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
buffer := &bytes.Buffer{}
|
||||
|
||||
fmt.Fprintf(buffer, "[AccumulatingBucket with %d elements and %d capacity] { ", len(b.elements), b.maximumSize)
|
||||
|
||||
|
@ -79,7 +79,7 @@ func (b *AccumulatingBucket) String() string {
|
|||
|
||||
fmt.Fprintf(buffer, "}")
|
||||
|
||||
return string(buffer.Bytes())
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (b *AccumulatingBucket) ValueForIndex(index int) float64 {
|
||||
|
@ -116,3 +116,14 @@ func (b *AccumulatingBucket) Observations() int {
|
|||
|
||||
return b.observations
|
||||
}
|
||||
|
||||
func (b *AccumulatingBucket) Reset() {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.RUnlock()
|
||||
|
||||
for i := 0; i < b.elements.Len(); i++ {
|
||||
b.elements.Pop()
|
||||
}
|
||||
|
||||
b.observations = 0
|
||||
}
|
||||
|
|
|
@ -29,14 +29,16 @@ type Bucket interface {
|
|||
Add a value to the bucket.
|
||||
*/
|
||||
Add(value float64)
|
||||
/*
|
||||
Provide a humanized representation hereof.
|
||||
*/
|
||||
String() string
|
||||
/*
|
||||
Provide a count of observations throughout the bucket's lifetime.
|
||||
*/
|
||||
Observations() int
|
||||
// Reset is responsible for resetting this bucket back to a pristine state.
|
||||
Reset()
|
||||
/*
|
||||
Provide a humanized representation hereof.
|
||||
*/
|
||||
String() string
|
||||
/*
|
||||
Provide the value from the given in-memory value cache or an estimate
|
||||
thereof for the given index. The consumer of the bucket's data makes
|
||||
|
|
|
@ -12,12 +12,13 @@ constants.go provides package-level constants for metrics.
|
|||
package metrics
|
||||
|
||||
const (
|
||||
valueKey = "value"
|
||||
gaugeTypeValue = "gauge"
|
||||
counterTypeValue = "counter"
|
||||
typeKey = "type"
|
||||
histogramTypeValue = "histogram"
|
||||
floatBitCount = 64
|
||||
floatFormat = 'f'
|
||||
floatPrecision = 6
|
||||
floatBitCount = 64
|
||||
gaugeTypeValue = "gauge"
|
||||
histogramTypeValue = "histogram"
|
||||
typeKey = "type"
|
||||
valueKey = "value"
|
||||
labelsKey = "labels"
|
||||
)
|
||||
|
|
|
@ -10,77 +10,145 @@ package metrics
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/matttproud/golang_instrumentation/utility"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type CounterMetric struct {
|
||||
value float64
|
||||
mutex sync.RWMutex
|
||||
// TODO(matt): Refactor to de-duplicate behaviors.
|
||||
|
||||
type Counter interface {
|
||||
AsMarshallable() map[string]interface{}
|
||||
Decrement(labels map[string]string) float64
|
||||
DecrementBy(labels map[string]string, value float64) float64
|
||||
Increment(labels map[string]string) float64
|
||||
IncrementBy(labels map[string]string, value float64) float64
|
||||
ResetAll()
|
||||
Set(labels map[string]string, value float64) float64
|
||||
String() string
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) Set(value float64) float64 {
|
||||
type counterValue struct {
|
||||
labels map[string]string
|
||||
value float64
|
||||
}
|
||||
|
||||
func NewCounter() Counter {
|
||||
return &counter{
|
||||
values: map[string]*counterValue{},
|
||||
}
|
||||
}
|
||||
|
||||
type counter struct {
|
||||
mutex sync.RWMutex
|
||||
values map[string]*counterValue
|
||||
}
|
||||
|
||||
func (metric *counter) Set(labels map[string]string, value float64) float64 {
|
||||
metric.mutex.Lock()
|
||||
defer metric.mutex.Unlock()
|
||||
|
||||
metric.value = value
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
return metric.value
|
||||
signature := utility.LabelsToSignature(labels)
|
||||
if original, ok := metric.values[signature]; ok {
|
||||
original.value = value
|
||||
} else {
|
||||
metric.values[signature] = &counterValue{
|
||||
labels: labels,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) Reset() {
|
||||
metric.Set(0)
|
||||
func (metric *counter) ResetAll() {
|
||||
metric.mutex.Lock()
|
||||
defer metric.mutex.Unlock()
|
||||
|
||||
for key, value := range metric.values {
|
||||
for label := range value.labels {
|
||||
delete(value.labels, label)
|
||||
}
|
||||
delete(metric.values, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) String() string {
|
||||
formatString := "[CounterMetric; value=%f]"
|
||||
func (metric *counter) String() string {
|
||||
formatString := "[Counter %s]"
|
||||
|
||||
metric.mutex.RLock()
|
||||
defer metric.mutex.RUnlock()
|
||||
|
||||
return fmt.Sprintf(formatString, metric.value)
|
||||
return fmt.Sprintf(formatString, metric.values)
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) IncrementBy(value float64) float64 {
|
||||
func (metric *counter) IncrementBy(labels map[string]string, value float64) float64 {
|
||||
metric.mutex.Lock()
|
||||
defer metric.mutex.Unlock()
|
||||
|
||||
metric.value += value
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
return metric.value
|
||||
signature := utility.LabelsToSignature(labels)
|
||||
if original, ok := metric.values[signature]; ok {
|
||||
original.value += value
|
||||
} else {
|
||||
metric.values[signature] = &counterValue{
|
||||
labels: labels,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) Increment() float64 {
|
||||
return metric.IncrementBy(1)
|
||||
func (metric *counter) Increment(labels map[string]string) float64 {
|
||||
return metric.IncrementBy(labels, 1)
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) DecrementBy(value float64) float64 {
|
||||
func (metric *counter) DecrementBy(labels map[string]string, value float64) float64 {
|
||||
metric.mutex.Lock()
|
||||
defer metric.mutex.Unlock()
|
||||
|
||||
metric.value -= value
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
return metric.value
|
||||
signature := utility.LabelsToSignature(labels)
|
||||
if original, ok := metric.values[signature]; ok {
|
||||
original.value -= value
|
||||
} else {
|
||||
metric.values[signature] = &counterValue{
|
||||
labels: labels,
|
||||
value: -1 * value,
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) Decrement() float64 {
|
||||
return metric.DecrementBy(1)
|
||||
func (metric *counter) Decrement(labels map[string]string) float64 {
|
||||
return metric.DecrementBy(labels, 1)
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) Get() float64 {
|
||||
func (metric *counter) AsMarshallable() map[string]interface{} {
|
||||
metric.mutex.RLock()
|
||||
defer metric.mutex.RUnlock()
|
||||
|
||||
return metric.value
|
||||
}
|
||||
|
||||
func (metric *CounterMetric) Marshallable() map[string]interface{} {
|
||||
metric.mutex.RLock()
|
||||
defer metric.mutex.RUnlock()
|
||||
|
||||
v := make(map[string]interface{}, 2)
|
||||
|
||||
v[valueKey] = metric.value
|
||||
v[typeKey] = counterTypeValue
|
||||
|
||||
return v
|
||||
values := make([]map[string]interface{}, 0, len(metric.values))
|
||||
for _, value := range metric.values {
|
||||
values = append(values, map[string]interface{}{
|
||||
labelsKey: value.labels,
|
||||
valueKey: value.value,
|
||||
})
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
valueKey: values,
|
||||
typeKey: counterTypeValue,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,90 +9,215 @@ license that can be found in the LICENSE file.
|
|||
package metrics
|
||||
|
||||
import (
|
||||
. "github.com/matttproud/gocheck"
|
||||
"encoding/json"
|
||||
"github.com/matttproud/golang_instrumentation/utility/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (s *S) TestCounterCreate(c *C) {
|
||||
m := CounterMetric{value: 1.0}
|
||||
func testCounter(t test.Tester) {
|
||||
type input struct {
|
||||
steps []func(g Counter)
|
||||
}
|
||||
type output struct {
|
||||
value string
|
||||
}
|
||||
|
||||
c.Assert(m, Not(IsNil))
|
||||
var scenarios = []struct {
|
||||
in input
|
||||
out output
|
||||
}{
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(nil, 1)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{},\"value\":1}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{}, 2)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{},\"value\":2}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{}, 3)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{}, 5)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{},\"value\":5}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 13)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/bar"}, 17)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.ResetAll()
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 19)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":19}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 23)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.Increment(map[string]string{"handler": "/foo"})
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":24}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Increment(map[string]string{"handler": "/foo"})
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":1}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Decrement(map[string]string{"handler": "/foo"})
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":-1}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 29)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.Decrement(map[string]string{"handler": "/foo"})
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":28}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 31)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.IncrementBy(map[string]string{"handler": "/foo"}, 5)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":36}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Counter){
|
||||
func(g Counter) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 37)
|
||||
},
|
||||
func(g Counter) {
|
||||
g.DecrementBy(map[string]string{"handler": "/foo"}, 10)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"counter\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":27}]}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
counter := NewCounter()
|
||||
|
||||
for _, step := range scenario.in.steps {
|
||||
step(counter)
|
||||
}
|
||||
|
||||
marshallable := counter.AsMarshallable()
|
||||
|
||||
bytes, err := json.Marshal(marshallable)
|
||||
if err != nil {
|
||||
t.Errorf("%d. could not marshal into JSON %s", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
asString := string(bytes)
|
||||
|
||||
if scenario.out.value != asString {
|
||||
t.Errorf("%d. expected %q, got %q", i, scenario.out.value, asString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestCounterGet(c *C) {
|
||||
m := CounterMetric{value: 42.23}
|
||||
|
||||
c.Check(m.Get(), Equals, 42.23)
|
||||
func TestCounter(t *testing.T) {
|
||||
testCounter(t)
|
||||
}
|
||||
|
||||
func (s *S) TestCounterSet(c *C) {
|
||||
m := CounterMetric{value: 42.23}
|
||||
m.Set(40.4)
|
||||
|
||||
c.Check(m.Get(), Equals, 40.4)
|
||||
}
|
||||
|
||||
func (s *S) TestCounterReset(c *C) {
|
||||
m := CounterMetric{value: 42.23}
|
||||
m.Reset()
|
||||
|
||||
c.Check(m.Get(), Equals, 0.0)
|
||||
}
|
||||
|
||||
func (s *S) TestCounterIncrementBy(c *C) {
|
||||
m := CounterMetric{value: 1.0}
|
||||
|
||||
m.IncrementBy(1.5)
|
||||
|
||||
c.Check(m.Get(), Equals, 2.5)
|
||||
c.Check(m.String(), Equals, "[CounterMetric; value=2.500000]")
|
||||
}
|
||||
|
||||
func (s *S) TestCounterIncrement(c *C) {
|
||||
m := CounterMetric{value: 1.0}
|
||||
|
||||
m.Increment()
|
||||
|
||||
c.Check(m.Get(), Equals, 2.0)
|
||||
c.Check(m.String(), Equals, "[CounterMetric; value=2.000000]")
|
||||
}
|
||||
|
||||
func (s *S) TestCounterDecrementBy(c *C) {
|
||||
m := CounterMetric{value: 1.0}
|
||||
|
||||
m.DecrementBy(1.0)
|
||||
|
||||
c.Check(m.Get(), Equals, 0.0)
|
||||
c.Check(m.String(), Equals, "[CounterMetric; value=0.000000]")
|
||||
}
|
||||
|
||||
func (s *S) TestCounterDecrement(c *C) {
|
||||
m := CounterMetric{value: 1.0}
|
||||
|
||||
m.Decrement()
|
||||
|
||||
c.Check(m.Get(), Equals, 0.0)
|
||||
c.Check(m.String(), Equals, "[CounterMetric; value=0.000000]")
|
||||
}
|
||||
|
||||
func (s *S) TestCounterString(c *C) {
|
||||
m := CounterMetric{value: 2.0}
|
||||
c.Check(m.String(), Equals, "[CounterMetric; value=2.000000]")
|
||||
}
|
||||
|
||||
func (s *S) TestCounterMetricMarshallable(c *C) {
|
||||
m := CounterMetric{value: 1.0}
|
||||
|
||||
returned := m.Marshallable()
|
||||
|
||||
c.Assert(returned, Not(IsNil))
|
||||
|
||||
c.Check(returned, HasLen, 2)
|
||||
c.Check(returned["value"], Equals, 1.0)
|
||||
c.Check(returned["type"], Equals, "counter")
|
||||
}
|
||||
|
||||
func (s *S) TestCounterAsMetric(c *C) {
|
||||
var metric Metric = &CounterMetric{value: 1.0}
|
||||
|
||||
c.Assert(metric, Not(IsNil))
|
||||
func BenchmarkCounter(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testCounter(b)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ func (s *S) TestEvictOldest(c *C) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
var item utility.Item = utility.Item{
|
||||
Value: float64(i),
|
||||
Priority: int64(i),
|
||||
Value: float64(i),
|
||||
}
|
||||
|
||||
heap.Push(&q, &item)
|
||||
|
@ -49,8 +49,8 @@ func (s *S) TestEvictAndReplaceWithAverage(c *C) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
var item utility.Item = utility.Item{
|
||||
Value: float64(i),
|
||||
Priority: int64(i),
|
||||
Value: float64(i),
|
||||
}
|
||||
|
||||
heap.Push(&q, &item)
|
||||
|
@ -77,8 +77,8 @@ func (s *S) TestEvictAndReplaceWithMedian(c *C) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
var item utility.Item = utility.Item{
|
||||
Value: float64(i),
|
||||
Priority: int64(i),
|
||||
Value: float64(i),
|
||||
}
|
||||
|
||||
heap.Push(&q, &item)
|
||||
|
@ -105,8 +105,8 @@ func (s *S) TestEvictAndReplaceWithFirstMode(c *C) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
heap.Push(&q, &utility.Item{
|
||||
Value: float64(i),
|
||||
Priority: int64(i),
|
||||
Value: float64(i),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -131,8 +131,8 @@ func (s *S) TestEvictAndReplaceWithMinimum(c *C) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
var item utility.Item = utility.Item{
|
||||
Value: float64(i),
|
||||
Priority: int64(i),
|
||||
Value: float64(i),
|
||||
}
|
||||
|
||||
heap.Push(&q, &item)
|
||||
|
@ -159,8 +159,8 @@ func (s *S) TestEvictAndReplaceWithMaximum(c *C) {
|
|||
|
||||
for i := 0; i < 10; i++ {
|
||||
var item utility.Item = utility.Item{
|
||||
Value: float64(i),
|
||||
Priority: int64(i),
|
||||
Value: float64(i),
|
||||
}
|
||||
|
||||
heap.Push(&q, &item)
|
||||
|
|
|
@ -10,6 +10,7 @@ package metrics
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/matttproud/golang_instrumentation/utility"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -19,44 +20,86 @@ value or an accumulation. For instance, if one wants to expose the current
|
|||
temperature or the hitherto bandwidth used, this would be the metric for such
|
||||
circumstances.
|
||||
*/
|
||||
type GaugeMetric struct {
|
||||
value float64
|
||||
mutex sync.RWMutex
|
||||
type Gauge interface {
|
||||
AsMarshallable() map[string]interface{}
|
||||
ResetAll()
|
||||
Set(labels map[string]string, value float64) float64
|
||||
String() string
|
||||
}
|
||||
|
||||
func (metric *GaugeMetric) String() string {
|
||||
formatString := "[GaugeMetric; value=%f]"
|
||||
type gaugeValue struct {
|
||||
labels map[string]string
|
||||
value float64
|
||||
}
|
||||
|
||||
func NewGauge() Gauge {
|
||||
return &gauge{
|
||||
values: map[string]*gaugeValue{},
|
||||
}
|
||||
}
|
||||
|
||||
type gauge struct {
|
||||
mutex sync.RWMutex
|
||||
values map[string]*gaugeValue
|
||||
}
|
||||
|
||||
func (metric *gauge) String() string {
|
||||
formatString := "[Gauge %s]"
|
||||
|
||||
metric.mutex.RLock()
|
||||
defer metric.mutex.RUnlock()
|
||||
|
||||
return fmt.Sprintf(formatString, metric.value)
|
||||
return fmt.Sprintf(formatString, metric.values)
|
||||
}
|
||||
|
||||
func (metric *GaugeMetric) Set(value float64) float64 {
|
||||
func (metric *gauge) Set(labels map[string]string, value float64) float64 {
|
||||
metric.mutex.Lock()
|
||||
defer metric.mutex.Unlock()
|
||||
|
||||
metric.value = value
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
return metric.value
|
||||
signature := utility.LabelsToSignature(labels)
|
||||
|
||||
if original, ok := metric.values[signature]; ok {
|
||||
original.value = value
|
||||
} else {
|
||||
metric.values[signature] = &gaugeValue{
|
||||
labels: labels,
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (metric *GaugeMetric) Get() float64 {
|
||||
func (metric *gauge) ResetAll() {
|
||||
metric.mutex.Lock()
|
||||
defer metric.mutex.Unlock()
|
||||
|
||||
for key, value := range metric.values {
|
||||
for label := range value.labels {
|
||||
delete(value.labels, label)
|
||||
}
|
||||
delete(metric.values, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (metric *gauge) AsMarshallable() map[string]interface{} {
|
||||
metric.mutex.RLock()
|
||||
defer metric.mutex.RUnlock()
|
||||
|
||||
return metric.value
|
||||
}
|
||||
|
||||
func (metric *GaugeMetric) Marshallable() map[string]interface{} {
|
||||
metric.mutex.RLock()
|
||||
defer metric.mutex.RUnlock()
|
||||
|
||||
v := make(map[string]interface{}, 2)
|
||||
|
||||
v[valueKey] = metric.value
|
||||
v[typeKey] = gaugeTypeValue
|
||||
|
||||
return v
|
||||
values := make([]map[string]interface{}, 0, len(metric.values))
|
||||
for _, value := range metric.values {
|
||||
values = append(values, map[string]interface{}{
|
||||
labelsKey: value.labels,
|
||||
valueKey: value.value,
|
||||
})
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
typeKey: gaugeTypeValue,
|
||||
valueKey: values,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,43 +9,131 @@ license that can be found in the LICENSE file.
|
|||
package metrics
|
||||
|
||||
import (
|
||||
. "github.com/matttproud/gocheck"
|
||||
"encoding/json"
|
||||
"github.com/matttproud/golang_instrumentation/utility/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (s *S) TestGaugeCreate(c *C) {
|
||||
m := GaugeMetric{value: 1.0}
|
||||
func testGauge(t test.Tester) {
|
||||
type input struct {
|
||||
steps []func(g Gauge)
|
||||
}
|
||||
type output struct {
|
||||
value string
|
||||
}
|
||||
|
||||
c.Assert(m, Not(IsNil))
|
||||
c.Check(m.Get(), Equals, 1.0)
|
||||
var scenarios = []struct {
|
||||
in input
|
||||
out output
|
||||
}{
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Gauge){},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"gauge\",\"value\":[]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Gauge){
|
||||
func(g Gauge) {
|
||||
g.Set(nil, 1)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"gauge\",\"value\":[{\"labels\":{},\"value\":1}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Gauge){
|
||||
func(g Gauge) {
|
||||
g.Set(map[string]string{}, 2)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"gauge\",\"value\":[{\"labels\":{},\"value\":2}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Gauge){
|
||||
func(g Gauge) {
|
||||
g.Set(map[string]string{}, 3)
|
||||
},
|
||||
func(g Gauge) {
|
||||
g.Set(map[string]string{}, 5)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"gauge\",\"value\":[{\"labels\":{},\"value\":5}]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Gauge){
|
||||
func(g Gauge) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 13)
|
||||
},
|
||||
func(g Gauge) {
|
||||
g.Set(map[string]string{"handler": "/bar"}, 17)
|
||||
},
|
||||
func(g Gauge) {
|
||||
g.ResetAll()
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"gauge\",\"value\":[]}",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
steps: []func(g Gauge){
|
||||
func(g Gauge) {
|
||||
g.Set(map[string]string{"handler": "/foo"}, 19)
|
||||
},
|
||||
},
|
||||
},
|
||||
out: output{
|
||||
value: "{\"type\":\"gauge\",\"value\":[{\"labels\":{\"handler\":\"/foo\"},\"value\":19}]}",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
gauge := NewGauge()
|
||||
|
||||
for _, step := range scenario.in.steps {
|
||||
step(gauge)
|
||||
}
|
||||
|
||||
marshallable := gauge.AsMarshallable()
|
||||
|
||||
bytes, err := json.Marshal(marshallable)
|
||||
if err != nil {
|
||||
t.Errorf("%d. could not marshal into JSON %s", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
asString := string(bytes)
|
||||
|
||||
if scenario.out.value != asString {
|
||||
t.Errorf("%d. expected %q, got %q", i, scenario.out.value, asString)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestGaugeString(c *C) {
|
||||
m := GaugeMetric{value: 2.0}
|
||||
c.Check(m.String(), Equals, "[GaugeMetric; value=2.000000]")
|
||||
func TestGauge(t *testing.T) {
|
||||
testGauge(t)
|
||||
}
|
||||
|
||||
func (s *S) TestGaugeSet(c *C) {
|
||||
m := GaugeMetric{value: -1.0}
|
||||
|
||||
m.Set(-99.0)
|
||||
|
||||
c.Check(m.Get(), Equals, -99.0)
|
||||
}
|
||||
|
||||
func (s *S) TestGaugeMetricMarshallable(c *C) {
|
||||
m := GaugeMetric{value: 1.0}
|
||||
|
||||
returned := m.Marshallable()
|
||||
|
||||
c.Assert(returned, Not(IsNil))
|
||||
|
||||
c.Check(returned, HasLen, 2)
|
||||
c.Check(returned["value"], Equals, 1.0)
|
||||
c.Check(returned["type"], Equals, "gauge")
|
||||
}
|
||||
|
||||
func (s *S) TestGaugeAsMetric(c *C) {
|
||||
var metric Metric = &GaugeMetric{value: 1.0}
|
||||
|
||||
c.Assert(metric, Not(IsNil))
|
||||
func BenchmarkGauge(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testGauge(b)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,8 +11,10 @@ package metrics
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/matttproud/golang_instrumentation/utility"
|
||||
"math"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -53,21 +55,25 @@ func LogarithmicSizedBucketsFor(lower, upper float64) []float64 {
|
|||
A HistogramSpecification defines how a Histogram is to be built.
|
||||
*/
|
||||
type HistogramSpecification struct {
|
||||
Starts []float64
|
||||
BucketMaker BucketBuilder
|
||||
BucketBuilder BucketBuilder
|
||||
ReportablePercentiles []float64
|
||||
Starts []float64
|
||||
}
|
||||
|
||||
type Histogram interface {
|
||||
Add(labels map[string]string, value float64)
|
||||
AsMarshallable() map[string]interface{}
|
||||
ResetAll()
|
||||
String() string
|
||||
}
|
||||
|
||||
/*
|
||||
The histogram is an accumulator for samples. It merely routes into which
|
||||
to bucket to capture an event and provides a percentile calculation
|
||||
mechanism.
|
||||
|
||||
Histogram makes do without locking by employing the law of large numbers
|
||||
to presume a convergence toward a given bucket distribution. Locking
|
||||
may be implemented in the buckets themselves, though.
|
||||
*/
|
||||
type Histogram struct {
|
||||
type histogram struct {
|
||||
bucketMaker BucketBuilder
|
||||
/*
|
||||
This represents the open interval's start at which values shall be added to
|
||||
the bucket. The interval continues until the beginning of the next bucket
|
||||
|
@ -76,23 +82,52 @@ type Histogram struct {
|
|||
N.B.
|
||||
- bucketStarts should be sorted in ascending order;
|
||||
- len(bucketStarts) must be equivalent to len(buckets);
|
||||
- The index of a given bucketStarts' element is presumed to match
|
||||
- The index of a given bucketStarts' element is presumed to
|
||||
correspond to the appropriate element in buckets.
|
||||
*/
|
||||
bucketStarts []float64
|
||||
mutex sync.RWMutex
|
||||
/*
|
||||
These are the buckets that capture samples as they are emitted to the
|
||||
histogram. Please consult the reference interface and its implements for
|
||||
further details about behavior expectations.
|
||||
*/
|
||||
buckets []Bucket
|
||||
values map[string]*histogramValue
|
||||
/*
|
||||
These are the percentile values that will be reported on marshalling.
|
||||
*/
|
||||
reportablePercentiles []float64
|
||||
}
|
||||
|
||||
func (h *Histogram) Add(value float64) {
|
||||
type histogramValue struct {
|
||||
buckets []Bucket
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (h *histogram) Add(labels map[string]string, value float64) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
signature := utility.LabelsToSignature(labels)
|
||||
var histogram *histogramValue = nil
|
||||
if original, ok := h.values[signature]; ok {
|
||||
histogram = original
|
||||
} else {
|
||||
bucketCount := len(h.bucketStarts)
|
||||
histogram = &histogramValue{
|
||||
buckets: make([]Bucket, bucketCount),
|
||||
labels: labels,
|
||||
}
|
||||
for i := 0; i < bucketCount; i++ {
|
||||
histogram.buckets[i] = h.bucketMaker()
|
||||
}
|
||||
h.values[signature] = histogram
|
||||
}
|
||||
|
||||
lastIndex := 0
|
||||
|
||||
for i, bucketStart := range h.bucketStarts {
|
||||
|
@ -103,21 +138,27 @@ func (h *Histogram) Add(value float64) {
|
|||
lastIndex = i
|
||||
}
|
||||
|
||||
h.buckets[lastIndex].Add(value)
|
||||
histogram.buckets[lastIndex].Add(value)
|
||||
}
|
||||
|
||||
func (h *Histogram) String() string {
|
||||
stringBuffer := bytes.NewBufferString("")
|
||||
func (h *histogram) String() string {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
stringBuffer := &bytes.Buffer{}
|
||||
stringBuffer.WriteString("[Histogram { ")
|
||||
|
||||
for i, bucketStart := range h.bucketStarts {
|
||||
bucket := h.buckets[i]
|
||||
stringBuffer.WriteString(fmt.Sprintf("[%f, inf) = %s, ", bucketStart, bucket.String()))
|
||||
for _, histogram := range h.values {
|
||||
fmt.Fprintf(stringBuffer, "Labels: %s ", histogram.labels)
|
||||
for i, bucketStart := range h.bucketStarts {
|
||||
bucket := histogram.buckets[i]
|
||||
fmt.Fprintf(stringBuffer, "[%f, inf) = %s, ", bucketStart, bucket)
|
||||
}
|
||||
}
|
||||
|
||||
stringBuffer.WriteString("}]")
|
||||
|
||||
return string(stringBuffer.Bytes())
|
||||
return stringBuffer.String()
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -141,13 +182,15 @@ func prospectiveIndexForPercentile(percentile float64, totalObservations int) in
|
|||
/*
|
||||
Determine the next bucket element when interim bucket intervals may be empty.
|
||||
*/
|
||||
func (h *Histogram) nextNonEmptyBucketElement(currentIndex, bucketCount int, observationsByBucket []int) (*Bucket, int) {
|
||||
func (h *histogram) nextNonEmptyBucketElement(signature string, currentIndex, bucketCount int, observationsByBucket []int) (*Bucket, int) {
|
||||
for i := currentIndex; i < bucketCount; i++ {
|
||||
if observationsByBucket[i] == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return &h.buckets[i], 0
|
||||
histogram := h.values[signature]
|
||||
|
||||
return &histogram.buckets[i], 0
|
||||
}
|
||||
|
||||
panic("Illegal Condition: There were no remaining buckets to provide a value.")
|
||||
|
@ -160,8 +203,8 @@ longer contained by the bucket, the index of the last item is returned. This
|
|||
may occur if the underlying bucket catalogs values and employs an eviction
|
||||
strategy.
|
||||
*/
|
||||
func (h *Histogram) bucketForPercentile(percentile float64) (*Bucket, int) {
|
||||
bucketCount := len(h.buckets)
|
||||
func (h *histogram) bucketForPercentile(signature string, percentile float64) (*Bucket, int) {
|
||||
bucketCount := len(h.bucketStarts)
|
||||
|
||||
/*
|
||||
This captures the quantity of samples in a given bucket's range.
|
||||
|
@ -173,9 +216,11 @@ func (h *Histogram) bucketForPercentile(percentile float64) (*Bucket, int) {
|
|||
*/
|
||||
cumulativeObservationsByBucket := make([]int, bucketCount)
|
||||
|
||||
var totalObservations int = 0
|
||||
totalObservations := 0
|
||||
|
||||
for i, bucket := range h.buckets {
|
||||
histogram := h.values[signature]
|
||||
|
||||
for i, bucket := range histogram.buckets {
|
||||
observations := bucket.Observations()
|
||||
observationsByBucket[i] = observations
|
||||
totalObservations += bucket.Observations()
|
||||
|
@ -210,14 +255,14 @@ func (h *Histogram) bucketForPercentile(percentile float64) (*Bucket, int) {
|
|||
take this into account.
|
||||
*/
|
||||
if observationsByBucket[i] == subIndex {
|
||||
return h.nextNonEmptyBucketElement(i+1, bucketCount, observationsByBucket)
|
||||
return h.nextNonEmptyBucketElement(signature, i+1, bucketCount, observationsByBucket)
|
||||
}
|
||||
|
||||
return &h.buckets[i], subIndex
|
||||
return &histogram.buckets[i], subIndex
|
||||
}
|
||||
}
|
||||
|
||||
return &h.buckets[0], 0
|
||||
return &histogram.buckets[0], 0
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -225,8 +270,8 @@ Return the histogram's estimate of the value for a given percentile of
|
|||
collected samples. The requested percentile is expected to be a real
|
||||
value within (0, 1.0].
|
||||
*/
|
||||
func (h *Histogram) Percentile(percentile float64) float64 {
|
||||
bucket, index := h.bucketForPercentile(percentile)
|
||||
func (h *histogram) percentile(signature string, percentile float64) float64 {
|
||||
bucket, index := h.bucketForPercentile(signature, percentile)
|
||||
|
||||
return (*bucket).ValueForIndex(index)
|
||||
}
|
||||
|
@ -235,38 +280,53 @@ func formatFloat(value float64) string {
|
|||
return strconv.FormatFloat(value, floatFormat, floatPrecision, floatBitCount)
|
||||
}
|
||||
|
||||
func (h *Histogram) Marshallable() map[string]interface{} {
|
||||
numberOfPercentiles := len(h.reportablePercentiles)
|
||||
func (h *histogram) AsMarshallable() map[string]interface{} {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
result := make(map[string]interface{}, 2)
|
||||
|
||||
result[typeKey] = histogramTypeValue
|
||||
values := make([]map[string]interface{}, 0, len(h.values))
|
||||
|
||||
value := make(map[string]interface{}, numberOfPercentiles)
|
||||
|
||||
for _, percentile := range h.reportablePercentiles {
|
||||
percentileString := formatFloat(percentile)
|
||||
value[percentileString] = formatFloat(h.Percentile(percentile))
|
||||
for signature, value := range h.values {
|
||||
metricContainer := map[string]interface{}{}
|
||||
metricContainer[labelsKey] = value.labels
|
||||
intermediate := map[string]interface{}{}
|
||||
for _, percentile := range h.reportablePercentiles {
|
||||
formatted := formatFloat(percentile)
|
||||
intermediate[formatted] = h.percentile(signature, percentile)
|
||||
}
|
||||
metricContainer[valueKey] = intermediate
|
||||
values = append(values, metricContainer)
|
||||
}
|
||||
|
||||
result[valueKey] = value
|
||||
result[valueKey] = values
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (h *histogram) ResetAll() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
for signature, value := range h.values {
|
||||
for _, bucket := range value.buckets {
|
||||
bucket.Reset()
|
||||
}
|
||||
|
||||
delete(h.values, signature)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Produce a histogram from a given specification.
|
||||
*/
|
||||
func CreateHistogram(specification *HistogramSpecification) *Histogram {
|
||||
bucketCount := len(specification.Starts)
|
||||
|
||||
metric := &Histogram{
|
||||
func NewHistogram(specification *HistogramSpecification) Histogram {
|
||||
metric := &histogram{
|
||||
bucketMaker: specification.BucketBuilder,
|
||||
bucketStarts: specification.Starts,
|
||||
buckets: make([]Bucket, bucketCount),
|
||||
reportablePercentiles: specification.ReportablePercentiles,
|
||||
}
|
||||
|
||||
for i := 0; i < bucketCount; i++ {
|
||||
metric.buckets[i] = specification.BucketMaker()
|
||||
values: map[string]*histogramValue{},
|
||||
}
|
||||
|
||||
return metric
|
||||
|
|
|
@ -1,974 +1,9 @@
|
|||
/*
|
||||
Copyright (c) 2012, Matt T. Proud
|
||||
All rights reserved.
|
||||
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
// Copyright (c) 2013, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
. "github.com/matttproud/gocheck"
|
||||
"github.com/matttproud/golang_instrumentation/maths"
|
||||
)
|
||||
|
||||
func (s *S) TestEquallySizedBucketsFor(c *C) {
|
||||
h := EquallySizedBucketsFor(0, 10, 5)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
c.Check(h, HasLen, 5)
|
||||
c.Check(h[0], Equals, 0.0)
|
||||
c.Check(h[1], Equals, 2.0)
|
||||
c.Check(h[2], Equals, 4.0)
|
||||
c.Check(h[3], Equals, 6.0)
|
||||
c.Check(h[4], Equals, 8.0)
|
||||
}
|
||||
|
||||
func (s *S) TestLogarithmicSizedBucketsFor(c *C) {
|
||||
h := LogarithmicSizedBucketsFor(0, 2048)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
c.Check(h, HasLen, 11)
|
||||
c.Check(h[0], Equals, 0.0)
|
||||
c.Check(h[1], Equals, 2.0)
|
||||
c.Check(h[2], Equals, 4.0)
|
||||
c.Check(h[3], Equals, 8.0)
|
||||
c.Check(h[4], Equals, 16.0)
|
||||
c.Check(h[5], Equals, 32.0)
|
||||
c.Check(h[6], Equals, 64.0)
|
||||
c.Check(h[7], Equals, 128.0)
|
||||
c.Check(h[8], Equals, 256.0)
|
||||
c.Check(h[9], Equals, 512.0)
|
||||
c.Check(h[10], Equals, 1024.0)
|
||||
}
|
||||
|
||||
func (s *S) TestCreateHistogram(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 10, 5),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
c.Check(h.String(), Equals, "[Histogram { [0.000000, inf) = [TallyingBucket (Empty)], [2.000000, inf) = [TallyingBucket (Empty)], [4.000000, inf) = [TallyingBucket (Empty)], [6.000000, inf) = [TallyingBucket (Empty)], [8.000000, inf) = [TallyingBucket (Empty)], }]")
|
||||
|
||||
h.Add(1)
|
||||
|
||||
c.Check(h.String(), Equals, "[Histogram { [0.000000, inf) = [TallyingBucket (1.000000, 1.000000); 1 items], [2.000000, inf) = [TallyingBucket (Empty)], [4.000000, inf) = [TallyingBucket (Empty)], [6.000000, inf) = [TallyingBucket (Empty)], [8.000000, inf) = [TallyingBucket (Empty)], }]")
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentile(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 100, 100),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
for i := 2.0; i <= 100.0; i++ {
|
||||
h.Add(i)
|
||||
}
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.05)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
h.Add(50)
|
||||
h.Add(51)
|
||||
}
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.50)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 50)
|
||||
c.Check((*bucket).Observations(), Equals, 51)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.51)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 51)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileSingleton(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 3, 3),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
var h *Histogram = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(0.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(2.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileDoubleInSingleBucket(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 3, 3),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
var h *Histogram = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(0.0)
|
||||
h.Add(0.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(1.0)
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(2.0)
|
||||
h.Add(2.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileTripleInSingleBucket(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 3, 3),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
var h *Histogram = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(0.0)
|
||||
h.Add(0.0)
|
||||
h.Add(0.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 2)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
h.Add(1.0)
|
||||
h.Add(1.0)
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 2)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
h.Add(2.0)
|
||||
h.Add(2.0)
|
||||
h.Add(2.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 2)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 3)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileTwoEqualAdjacencies(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 3, 3),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
var h *Histogram = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(0.0)
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
h.Add(1.0)
|
||||
h.Add(2.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileTwoAdjacenciesUnequal(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 3, 3),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
var h *Histogram = CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
for i := 0.0; i < 1.0; i += 0.01 {
|
||||
bucket, subindex := h.bucketForPercentile(i)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
}
|
||||
|
||||
h.Add(0.0)
|
||||
h.Add(0.0)
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
h.Add(0.0)
|
||||
h.Add(1.0)
|
||||
h.Add(1.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
h.Add(1.0)
|
||||
h.Add(1.0)
|
||||
h.Add(2.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
h = CreateHistogram(hs)
|
||||
|
||||
h.Add(1.0)
|
||||
h.Add(2.0)
|
||||
h.Add(2.0)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 1)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.67)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(2.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.5)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 2)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0 / 3.0)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileWithBinomialApproximation(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 5, 6),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
c.Assert(hs, Not(IsNil))
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
n := 5
|
||||
p := 0.5
|
||||
|
||||
for k := 0; k < 6; k++ {
|
||||
limit := 1000000.0 * maths.BinomialPDF(k, n, p)
|
||||
for j := 0.0; j < limit; j++ {
|
||||
h.Add(float64(k))
|
||||
}
|
||||
}
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.0)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 31250)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.03125)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 31249)
|
||||
c.Check((*bucket).Observations(), Equals, 31250)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.1875)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 156249)
|
||||
c.Check((*bucket).Observations(), Equals, 156250)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.50)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 312499)
|
||||
c.Check((*bucket).Observations(), Equals, 312500)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.8125)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 312499)
|
||||
c.Check((*bucket).Observations(), Equals, 312500)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.96875)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 156249)
|
||||
c.Check((*bucket).Observations(), Equals, 156250)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 31249)
|
||||
c.Check((*bucket).Observations(), Equals, 31250)
|
||||
}
|
||||
|
||||
func (s *S) TestBucketForPercentileWithUniform(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 100, 100),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
c.Assert(hs, Not(IsNil))
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
for i := 0.0; i <= 99.0; i++ {
|
||||
h.Add(i)
|
||||
}
|
||||
|
||||
for i := 0; i <= 99; i++ {
|
||||
c.Check(h.bucketStarts[i], Equals, float64(i))
|
||||
}
|
||||
|
||||
for i := 1; i <= 100; i++ {
|
||||
c.Check(h.buckets[i-1].Observations(), Equals, 1)
|
||||
}
|
||||
|
||||
var bucket *Bucket = nil
|
||||
var subindex int = 0
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(0.01)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
|
||||
bucket, subindex = h.bucketForPercentile(1.0)
|
||||
c.Assert(*bucket, Not(IsNil))
|
||||
c.Check(subindex, Equals, 0)
|
||||
c.Check((*bucket).Observations(), Equals, 1)
|
||||
}
|
||||
|
||||
func (s *S) TestHistogramPercentileUniform(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 100, 100),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
for i := 0.0; i <= 99.0; i++ {
|
||||
h.Add(i)
|
||||
}
|
||||
|
||||
c.Check(h.Percentile(0.01), Equals, 0.0)
|
||||
c.Check(h.Percentile(0.49), Equals, 48.0)
|
||||
c.Check(h.Percentile(0.50), Equals, 49.0)
|
||||
c.Check(h.Percentile(0.51), Equals, 50.0)
|
||||
c.Check(h.Percentile(1.0), Equals, 99.0)
|
||||
}
|
||||
|
||||
func (s *S) TestHistogramPercentileBinomialApproximation(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 5, 6),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
}
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
n := 5
|
||||
p := 0.5
|
||||
|
||||
for k := 0; k < 6; k++ {
|
||||
limit := 1000000.0 * maths.BinomialPDF(k, n, p)
|
||||
for j := 0.0; j < limit; j++ {
|
||||
h.Add(float64(k))
|
||||
}
|
||||
}
|
||||
|
||||
c.Check(h.Percentile(0.0), Equals, 0.0)
|
||||
c.Check(h.Percentile(0.03125), Equals, 0.0)
|
||||
c.Check(h.Percentile(0.1875), Equals, 1.0)
|
||||
c.Check(h.Percentile(0.5), Equals, 2.0)
|
||||
c.Check(h.Percentile(0.8125), Equals, 3.0)
|
||||
c.Check(h.Percentile(0.96875), Equals, 4.0)
|
||||
c.Check(h.Percentile(1.0), Equals, 5.0)
|
||||
}
|
||||
|
||||
func (s *S) TestHistogramMarshallable(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 5, 6),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
ReportablePercentiles: []float64{0.03125, 0.1875, 0.5, 0.8125, 0.96875, 1.0},
|
||||
}
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
c.Assert(h, Not(IsNil))
|
||||
|
||||
n := 5
|
||||
p := 0.5
|
||||
|
||||
for k := 0; k < 6; k++ {
|
||||
limit := 1000000.0 * maths.BinomialPDF(k, n, p)
|
||||
for j := 0.0; j < limit; j++ {
|
||||
h.Add(float64(k))
|
||||
}
|
||||
}
|
||||
|
||||
m := h.Marshallable()
|
||||
|
||||
c.Assert(m, Not(IsNil))
|
||||
c.Check(m, HasLen, 2)
|
||||
c.Check(m["type"], Equals, "histogram")
|
||||
|
||||
var v map[string]interface{} = m["value"].(map[string]interface{})
|
||||
|
||||
c.Assert(v, Not(IsNil))
|
||||
|
||||
c.Check(v, HasLen, 6)
|
||||
c.Check(v["0.031250"], Equals, "0.000000")
|
||||
c.Check(v["0.187500"], Equals, "1.000000")
|
||||
c.Check(v["0.500000"], Equals, "2.000000")
|
||||
c.Check(v["0.812500"], Equals, "3.000000")
|
||||
c.Check(v["0.968750"], Equals, "4.000000")
|
||||
c.Check(v["1.000000"], Equals, "5.000000")
|
||||
}
|
||||
|
||||
func (s *S) TestHistogramAsMetric(c *C) {
|
||||
hs := &HistogramSpecification{
|
||||
Starts: EquallySizedBucketsFor(0, 5, 6),
|
||||
BucketMaker: TallyingBucketBuilder,
|
||||
ReportablePercentiles: []float64{0.0, 0.03125, 0.1875, 0.5, 0.8125, 0.96875, 1.0},
|
||||
}
|
||||
|
||||
h := CreateHistogram(hs)
|
||||
|
||||
var metric Metric = h
|
||||
|
||||
c.Assert(metric, Not(IsNil))
|
||||
}
|
||||
// TODO(matt): Re-Add tests for this type.
|
||||
|
|
|
@ -1,23 +1,17 @@
|
|||
/*
|
||||
Copyright (c) 2012, Matt T. Proud
|
||||
All rights reserved.
|
||||
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
// Copyright (c) 2012, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package metrics
|
||||
|
||||
/*
|
||||
A Metric is something that can be exposed via the registry framework.
|
||||
*/
|
||||
// A Metric is something that can be exposed via the registry framework.
|
||||
type Metric interface {
|
||||
/*
|
||||
Produce a human-consumable representation of the metric.
|
||||
*/
|
||||
// Produce a JSON-consumable representation of the metric.
|
||||
AsMarshallable() map[string]interface{}
|
||||
// Reset the parent metrics and delete all child metrics.
|
||||
ResetAll()
|
||||
// Produce a human-consumable representation of the metric.
|
||||
String() string
|
||||
/*
|
||||
Produce a JSON-consumable representation of the metric.
|
||||
*/
|
||||
Marshallable() map[string]interface{}
|
||||
}
|
||||
|
|
|
@ -88,11 +88,11 @@ Upon insertion, an object is compared against collected extrema and noted
|
|||
as a new minimum or maximum if appropriate.
|
||||
*/
|
||||
type TallyingBucket struct {
|
||||
observations int
|
||||
smallestObserved float64
|
||||
estimator TallyingIndexEstimator
|
||||
largestObserved float64
|
||||
mutex sync.RWMutex
|
||||
estimator TallyingIndexEstimator
|
||||
observations int
|
||||
smallestObserved float64
|
||||
}
|
||||
|
||||
func (b *TallyingBucket) Add(value float64) {
|
||||
|
@ -131,22 +131,31 @@ func (b *TallyingBucket) ValueForIndex(index int) float64 {
|
|||
return b.estimator(b.smallestObserved, b.largestObserved, index, b.observations)
|
||||
}
|
||||
|
||||
func (b *TallyingBucket) Reset() {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
||||
b.largestObserved = math.SmallestNonzeroFloat64
|
||||
b.observations = 0
|
||||
b.smallestObserved = math.MaxFloat64
|
||||
}
|
||||
|
||||
/*
|
||||
Produce a TallyingBucket with sane defaults.
|
||||
*/
|
||||
func DefaultTallyingBucket() TallyingBucket {
|
||||
return TallyingBucket{
|
||||
smallestObserved: math.MaxFloat64,
|
||||
largestObserved: math.SmallestNonzeroFloat64,
|
||||
estimator: Minimum,
|
||||
largestObserved: math.SmallestNonzeroFloat64,
|
||||
smallestObserved: math.MaxFloat64,
|
||||
}
|
||||
}
|
||||
|
||||
func CustomTallyingBucket(estimator TallyingIndexEstimator) TallyingBucket {
|
||||
return TallyingBucket{
|
||||
smallestObserved: math.MaxFloat64,
|
||||
largestObserved: math.SmallestNonzeroFloat64,
|
||||
estimator: estimator,
|
||||
largestObserved: math.SmallestNonzeroFloat64,
|
||||
smallestObserved: math.MaxFloat64,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,19 +30,23 @@ N.B.(mtp): A major limitation hereof is that the StopWatch protocol cannot
|
|||
retain instrumentation if a panic percolates within the context that is
|
||||
being measured.
|
||||
*/
|
||||
type StopWatch struct {
|
||||
startTime time.Time
|
||||
type StopWatch interface {
|
||||
Stop() time.Duration
|
||||
}
|
||||
|
||||
type stopWatch struct {
|
||||
endTime time.Time
|
||||
onCompletion CompletionCallback
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
/*
|
||||
Return a new StopWatch that is ready for instrumentation.
|
||||
*/
|
||||
func Start(onCompletion CompletionCallback) *StopWatch {
|
||||
return &StopWatch{
|
||||
startTime: time.Now(),
|
||||
func Start(onCompletion CompletionCallback) StopWatch {
|
||||
return &stopWatch{
|
||||
onCompletion: onCompletion,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,7 +54,7 @@ func Start(onCompletion CompletionCallback) *StopWatch {
|
|||
Stop the StopWatch returning the elapsed duration of its lifetime while
|
||||
firing an optional CompletionCallback in the background.
|
||||
*/
|
||||
func (s *StopWatch) Stop() time.Duration {
|
||||
func (s *stopWatch) Stop() time.Duration {
|
||||
s.endTime = time.Now()
|
||||
duration := s.endTime.Sub(s.startTime)
|
||||
|
||||
|
|
|
@ -14,7 +14,10 @@ import (
|
|||
)
|
||||
|
||||
func (s *S) TestTimerStart(c *C) {
|
||||
stopWatch := Start(nil)
|
||||
stopWatch, ok := Start(nil).(*stopWatch)
|
||||
if !ok {
|
||||
c.Check(ok, Equals, true)
|
||||
}
|
||||
|
||||
c.Assert(stopWatch, Not(IsNil))
|
||||
c.Assert(stopWatch.startTime, Not(IsNil))
|
||||
|
|
229
registry.go
229
registry.go
|
@ -9,21 +9,39 @@ the LICENSE file.
|
|||
package registry
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/matttproud/golang_instrumentation/metrics"
|
||||
"github.com/matttproud/golang_instrumentation/utility"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
authorization = "Authorization"
|
||||
contentType = "Content-Type"
|
||||
jsonContentType = "application/json"
|
||||
jsonSuffix = ".json"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
authorization = "Authorization"
|
||||
authorizationHeader = "WWW-Authenticate"
|
||||
authorizationHeaderValue = "Basic"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
contentTypeHeader = "Content-Type"
|
||||
gzipAcceptEncodingValue = "gzip"
|
||||
gzipContentEncodingValue = "gzip"
|
||||
jsonContentType = "application/json"
|
||||
jsonSuffix = ".json"
|
||||
)
|
||||
|
||||
var (
|
||||
abortOnMisuse bool
|
||||
debugRegistration bool
|
||||
useAggressiveSanityChecks bool
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -31,12 +49,18 @@ This callback accumulates the microsecond duration of the reporting framework's
|
|||
overhead such that it can be reported.
|
||||
*/
|
||||
var requestLatencyAccumulator metrics.CompletionCallback = func(duration time.Duration) {
|
||||
microseconds := float64(duration / time.Millisecond)
|
||||
microseconds := float64(duration / time.Microsecond)
|
||||
|
||||
requestLatencyLogarithmicAccumulating.Add(microseconds)
|
||||
requestLatencyEqualAccumulating.Add(microseconds)
|
||||
requestLatencyLogarithmicTallying.Add(microseconds)
|
||||
requestLatencyEqualTallying.Add(microseconds)
|
||||
requestLatency.Add(nil, microseconds)
|
||||
}
|
||||
|
||||
// container represents a top-level registered metric that encompasses its
|
||||
// static metadata.
|
||||
type container struct {
|
||||
baseLabels map[string]string
|
||||
docstring string
|
||||
metric metrics.Metric
|
||||
name string
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -46,8 +70,8 @@ In most situations, using DefaultRegistry is sufficient versus creating one's
|
|||
own.
|
||||
*/
|
||||
type Registry struct {
|
||||
mutex sync.RWMutex
|
||||
NameToMetric map[string]metrics.Metric
|
||||
mutex sync.RWMutex
|
||||
signatureContainers map[string]container
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -56,7 +80,7 @@ cases.
|
|||
*/
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{
|
||||
NameToMetric: make(map[string]metrics.Metric),
|
||||
signatureContainers: make(map[string]container),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,25 +93,96 @@ var DefaultRegistry = NewRegistry()
|
|||
/*
|
||||
Associate a Metric with the DefaultRegistry.
|
||||
*/
|
||||
func Register(name, unusedDocstring string, unusedBaseLabels map[string]string, metric metrics.Metric) {
|
||||
DefaultRegistry.Register(name, unusedDocstring, unusedBaseLabels, metric)
|
||||
func Register(name, docstring string, baseLabels map[string]string, metric metrics.Metric) error {
|
||||
return DefaultRegistry.Register(name, docstring, baseLabels, metric)
|
||||
}
|
||||
|
||||
// isValidCandidate returns true if the candidate is acceptable for use. In the
|
||||
// event of any apparent incorrect use it will report the problem, invalidate
|
||||
// the candidate, or outright abort.
|
||||
func (r *Registry) isValidCandidate(name string, baseLabels map[string]string) (signature string, err error) {
|
||||
if len(name) == 0 {
|
||||
err = fmt.Errorf("unnamed metric named with baseLabels %s is invalid", baseLabels)
|
||||
|
||||
if abortOnMisuse {
|
||||
panic(err)
|
||||
} else if debugRegistration {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, contains := baseLabels[nameLabel]; contains {
|
||||
err = fmt.Errorf("metric named %s with baseLabels %s contains reserved label name %s in baseLabels", name, baseLabels, nameLabel)
|
||||
|
||||
if abortOnMisuse {
|
||||
panic(err)
|
||||
} else if debugRegistration {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
baseLabels[nameLabel] = name
|
||||
signature = utility.LabelsToSignature(baseLabels)
|
||||
|
||||
if _, contains := r.signatureContainers[signature]; contains {
|
||||
err = fmt.Errorf("metric named %s with baseLabels %s is already registered", name, baseLabels)
|
||||
if abortOnMisuse {
|
||||
panic(err)
|
||||
} else if debugRegistration {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if useAggressiveSanityChecks {
|
||||
for _, container := range r.signatureContainers {
|
||||
if container.name == name {
|
||||
err = fmt.Errorf("metric named %s with baseLabels %s is already registered as %s and risks causing confusion", name, baseLabels, container.baseLabels)
|
||||
if abortOnMisuse {
|
||||
panic(err)
|
||||
} else if debugRegistration {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
Register a metric with a given name. Name should be globally unique.
|
||||
*/
|
||||
func (r *Registry) Register(name, unusedDocstring string, unusedBaseLabels map[string]string, metric metrics.Metric) {
|
||||
func (r *Registry) Register(name, docstring string, baseLabels map[string]string, metric metrics.Metric) (err error) {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
|
||||
if _, present := r.NameToMetric[name]; !present {
|
||||
r.NameToMetric[name] = metric
|
||||
log.Printf("Registered %s.\n", name)
|
||||
} else {
|
||||
log.Printf("Attempted to register duplicate %s metric.\n", name)
|
||||
if baseLabels == nil {
|
||||
baseLabels = map[string]string{}
|
||||
}
|
||||
|
||||
signature, err := r.isValidCandidate(name, baseLabels)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.signatureContainers[signature] = container{
|
||||
baseLabels: baseLabels,
|
||||
docstring: docstring,
|
||||
metric: metric,
|
||||
name: name,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// YieldBasicAuthExporter creates a http.HandlerFunc that is protected by HTTP's
|
||||
// basic authentication.
|
||||
func (register *Registry) YieldBasicAuthExporter(username, password string) http.HandlerFunc {
|
||||
exporter := register.YieldExporter()
|
||||
|
||||
|
@ -108,12 +203,80 @@ func (register *Registry) YieldBasicAuthExporter(username, password string) http
|
|||
if authenticated {
|
||||
exporter.ServeHTTP(w, r)
|
||||
} else {
|
||||
w.Header().Add("WWW-Authenticate", "Basic")
|
||||
w.Header().Add(authorizationHeader, authorizationHeaderValue)
|
||||
http.Error(w, "access forbidden", 401)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (registry *Registry) dumpToWriter(writer io.Writer) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
dumpErrorCount.Increment(nil)
|
||||
}
|
||||
}()
|
||||
|
||||
numberOfMetrics := len(registry.signatureContainers)
|
||||
keys := make([]string, 0, numberOfMetrics)
|
||||
for key := range registry.signatureContainers {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
_, err = writer.Write([]byte("["))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
index := 0
|
||||
|
||||
for _, key := range keys {
|
||||
container := registry.signatureContainers[key]
|
||||
intermediate := map[string]interface{}{
|
||||
baseLabelsKey: container.baseLabels,
|
||||
docstringKey: container.docstring,
|
||||
metricKey: container.metric.AsMarshallable(),
|
||||
}
|
||||
marshaled, err := json.Marshal(intermediate)
|
||||
if err != nil {
|
||||
marshalErrorCount.Increment(nil)
|
||||
index++
|
||||
continue
|
||||
}
|
||||
|
||||
if index > 0 && index < numberOfMetrics {
|
||||
_, err = writer.Write([]byte(","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = writer.Write(marshaled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index++
|
||||
}
|
||||
|
||||
_, err = writer.Write([]byte("]"))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// decorateWriter annotates the response writer to handle any other behaviors
|
||||
// that might be beneficial to the client---e.g., GZIP encoding.
|
||||
func decorateWriter(request *http.Request, writer http.ResponseWriter) io.Writer {
|
||||
if !strings.Contains(request.Header.Get(acceptEncodingHeader), gzipAcceptEncodingValue) {
|
||||
return writer
|
||||
}
|
||||
|
||||
writer.Header().Set(contentEncodingHeader, gzipContentEncodingValue)
|
||||
gziper := gzip.NewWriter(writer)
|
||||
|
||||
return gziper
|
||||
}
|
||||
|
||||
/*
|
||||
Create a http.HandlerFunc that is tied to r Registry such that requests
|
||||
against it generate a representation of the housed metrics.
|
||||
|
@ -121,19 +284,23 @@ against it generate a representation of the housed metrics.
|
|||
func (registry *Registry) YieldExporter() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var instrumentable metrics.InstrumentableCall = func() {
|
||||
requestCount.Increment()
|
||||
requestCount.Increment(nil)
|
||||
url := r.URL
|
||||
|
||||
if strings.HasSuffix(url.Path, jsonSuffix) {
|
||||
w.Header().Set(contentType, jsonContentType)
|
||||
composite := make(map[string]interface{}, len(registry.NameToMetric))
|
||||
for name, metric := range registry.NameToMetric {
|
||||
composite[name] = metric.Marshallable()
|
||||
header := w.Header()
|
||||
header.Set(ProtocolVersionHeader, APIVersion)
|
||||
header.Set(contentTypeHeader, jsonContentType)
|
||||
|
||||
writer := decorateWriter(r, w)
|
||||
|
||||
// TODO(matt): Migrate to ioutil.NopCloser.
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
defer closer.Close()
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(composite)
|
||||
registry.dumpToWriter(writer)
|
||||
|
||||
w.Write(data)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
|
@ -142,3 +309,9 @@ func (registry *Registry) YieldExporter() http.HandlerFunc {
|
|||
metrics.InstrumentCall(instrumentable, requestLatencyAccumulator)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&abortOnMisuse, FlagNamespace+"abortonmisuse", false, "abort if a semantic misuse is encountered (bool).")
|
||||
flag.BoolVar(&debugRegistration, FlagNamespace+"debugregistration", false, "display information about the metric registration process (bool).")
|
||||
flag.BoolVar(&useAggressiveSanityChecks, FlagNamespace+"useaggressivesanitychecks", false, "perform expensive validation of metrics (bool).")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,331 @@
|
|||
// Copyright (c) 2013, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found in
|
||||
// the LICENSE file.
|
||||
|
||||
package registry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/matttproud/golang_instrumentation/metrics"
|
||||
"github.com/matttproud/golang_instrumentation/utility/test"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testRegister(t test.Tester) {
|
||||
var oldState = struct {
|
||||
abortOnMisuse bool
|
||||
debugRegistration bool
|
||||
useAggressiveSanityChecks bool
|
||||
}{
|
||||
abortOnMisuse: abortOnMisuse,
|
||||
debugRegistration: debugRegistration,
|
||||
useAggressiveSanityChecks: useAggressiveSanityChecks,
|
||||
}
|
||||
defer func() {
|
||||
abortOnMisuse = oldState.abortOnMisuse
|
||||
debugRegistration = oldState.debugRegistration
|
||||
useAggressiveSanityChecks = oldState.useAggressiveSanityChecks
|
||||
}()
|
||||
|
||||
type input struct {
|
||||
name string
|
||||
baseLabels map[string]string
|
||||
}
|
||||
|
||||
var scenarios = []struct {
|
||||
inputs []input
|
||||
outputs []bool
|
||||
}{
|
||||
{},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "my_name_without_labels",
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
true,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "my_name_without_labels",
|
||||
},
|
||||
{
|
||||
name: "another_name_without_labels",
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
true,
|
||||
true,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "",
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
false,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "valid_name",
|
||||
baseLabels: map[string]string{"name": "illegal_duplicate_name"},
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
false,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "duplicate_names",
|
||||
},
|
||||
{
|
||||
name: "duplicate_names",
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
true,
|
||||
false,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "duplicate_names_with_identical_labels",
|
||||
baseLabels: map[string]string{"label": "value"},
|
||||
},
|
||||
{
|
||||
name: "duplicate_names_with_identical_labels",
|
||||
baseLabels: map[string]string{"label": "value"},
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
true,
|
||||
false,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputs: []input{
|
||||
{
|
||||
name: "duplicate_names_with_dissimilar_labels",
|
||||
baseLabels: map[string]string{"label": "foo"},
|
||||
},
|
||||
{
|
||||
name: "duplicate_names_with_dissimilar_labels",
|
||||
baseLabels: map[string]string{"label": "bar"},
|
||||
},
|
||||
},
|
||||
outputs: []bool{
|
||||
true,
|
||||
false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
if len(scenario.inputs) != len(scenario.outputs) {
|
||||
t.Fatalf("%d. len(scenario.inputs) != len(scenario.outputs)")
|
||||
}
|
||||
|
||||
abortOnMisuse = false
|
||||
debugRegistration = false
|
||||
useAggressiveSanityChecks = true
|
||||
|
||||
registry := NewRegistry()
|
||||
|
||||
for j, input := range scenario.inputs {
|
||||
actual := registry.Register(input.name, "", input.baseLabels, nil)
|
||||
if scenario.outputs[j] != (actual == nil) {
|
||||
t.Errorf("%d.%d. expected %s, got %s", i, j, scenario.outputs[j], actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
testRegister(t)
|
||||
}
|
||||
|
||||
func BenchmarkRegister(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testRegister(b)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeResponseWriter struct {
|
||||
header http.Header
|
||||
body bytes.Buffer
|
||||
}
|
||||
|
||||
func (r *fakeResponseWriter) Header() http.Header {
|
||||
return r.header
|
||||
}
|
||||
|
||||
func (r *fakeResponseWriter) Write(d []byte) (l int, err error) {
|
||||
return r.body.Write(d)
|
||||
}
|
||||
|
||||
func (r *fakeResponseWriter) WriteHeader(c int) {
|
||||
}
|
||||
|
||||
func testDecorateWriter(t test.Tester) {
|
||||
type input struct {
|
||||
headers map[string]string
|
||||
body []byte
|
||||
}
|
||||
|
||||
type output struct {
|
||||
headers map[string]string
|
||||
body []byte
|
||||
}
|
||||
|
||||
var scenarios = []struct {
|
||||
in input
|
||||
out output
|
||||
}{
|
||||
{},
|
||||
{
|
||||
in: input{
|
||||
headers: map[string]string{
|
||||
"Accept-Encoding": "gzip,deflate,sdch",
|
||||
},
|
||||
body: []byte("Hi, mom!"),
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Encoding": "gzip",
|
||||
},
|
||||
body: []byte("\x1f\x8b\b\x00\x00\tn\x88\x00\xff\xf2\xc8\xd4Q\xc8\xcd\xcfU\x04\x04\x00\x00\xff\xff9C&&\b\x00\x00\x00"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
headers: map[string]string{
|
||||
"Accept-Encoding": "foo",
|
||||
},
|
||||
body: []byte("Hi, mom!"),
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{},
|
||||
body: []byte("Hi, mom!"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
request, _ := http.NewRequest("GET", "/", nil)
|
||||
|
||||
for key, value := range scenario.in.headers {
|
||||
request.Header.Add(key, value)
|
||||
}
|
||||
|
||||
baseWriter := &fakeResponseWriter{
|
||||
header: make(http.Header),
|
||||
}
|
||||
|
||||
writer := decorateWriter(request, baseWriter)
|
||||
|
||||
for key, value := range scenario.out.headers {
|
||||
if baseWriter.Header().Get(key) != value {
|
||||
t.Errorf("%d. expected %s for header %s, got %s", i, value, key, baseWriter.Header().Get(key))
|
||||
}
|
||||
}
|
||||
|
||||
writer.Write(scenario.in.body)
|
||||
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
|
||||
if !bytes.Equal(scenario.out.body, baseWriter.body.Bytes()) {
|
||||
t.Errorf("%d. expected %s for body, got %s", i, scenario.out.body, baseWriter.body.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecorateWriter(t *testing.T) {
|
||||
testDecorateWriter(t)
|
||||
}
|
||||
|
||||
func BenchmarkDecorateWriter(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testDecorateWriter(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testDumpToWriter(t test.Tester) {
|
||||
type input struct {
|
||||
metrics map[string]metrics.Metric
|
||||
}
|
||||
|
||||
var scenarios = []struct {
|
||||
in input
|
||||
out []byte
|
||||
}{
|
||||
{
|
||||
out: []byte("[]"),
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
metrics: map[string]metrics.Metric{
|
||||
"foo": metrics.NewCounter(),
|
||||
},
|
||||
},
|
||||
out: []byte("[{\"baseLabels\":{\"label_foo\":\"foo\",\"name\":\"foo\"},\"docstring\":\"metric foo\",\"metric\":{\"type\":\"counter\",\"value\":[]}}]"),
|
||||
},
|
||||
{
|
||||
in: input{
|
||||
metrics: map[string]metrics.Metric{
|
||||
"foo": metrics.NewCounter(),
|
||||
"bar": metrics.NewCounter(),
|
||||
},
|
||||
},
|
||||
out: []byte("[{\"baseLabels\":{\"label_bar\":\"bar\",\"name\":\"bar\"},\"docstring\":\"metric bar\",\"metric\":{\"type\":\"counter\",\"value\":[]}},{\"baseLabels\":{\"label_foo\":\"foo\",\"name\":\"foo\"},\"docstring\":\"metric foo\",\"metric\":{\"type\":\"counter\",\"value\":[]}}]"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
registry := NewRegistry()
|
||||
|
||||
for name, metric := range scenario.in.metrics {
|
||||
err := registry.Register(name, fmt.Sprintf("metric %s", name), map[string]string{fmt.Sprintf("label_%s", name): name}, metric)
|
||||
if err != nil {
|
||||
t.Errorf("%d. encountered error while registering metric %s", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
actual := &bytes.Buffer{}
|
||||
|
||||
err := registry.dumpToWriter(actual)
|
||||
if err != nil {
|
||||
t.Errorf("%d. encountered error while dumping %s", i, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(scenario.out, actual.Bytes()) {
|
||||
t.Errorf("%d. expected %q for dumping, got %q", i, scenario.out, actual.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDumpToWriter(t *testing.T) {
|
||||
testDumpToWriter(t)
|
||||
}
|
||||
|
||||
func BenchmarkDumpToWriter(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testDumpToWriter(b)
|
||||
}
|
||||
}
|
40
telemetry.go
40
telemetry.go
|
@ -20,43 +20,25 @@ exposed if the DefaultRegistry's exporter is hooked into the HTTP request
|
|||
handler.
|
||||
*/
|
||||
var (
|
||||
// TODO(matt): Refresh these names to support namespacing.
|
||||
marshalErrorCount = metrics.NewCounter()
|
||||
dumpErrorCount = metrics.NewCounter()
|
||||
|
||||
requestCount *metrics.CounterMetric = &metrics.CounterMetric{}
|
||||
requestLatencyLogarithmicBuckets []float64 = metrics.LogarithmicSizedBucketsFor(0, 1000)
|
||||
requestLatencyEqualBuckets []float64 = metrics.EquallySizedBucketsFor(0, 1000, 10)
|
||||
requestLatencyLogarithmicAccumulating *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
Starts: requestLatencyLogarithmicBuckets,
|
||||
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
|
||||
})
|
||||
requestLatencyEqualAccumulating *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
Starts: requestLatencyEqualBuckets,
|
||||
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
|
||||
})
|
||||
requestLatencyLogarithmicTallying *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
Starts: requestLatencyLogarithmicBuckets,
|
||||
BucketMaker: metrics.TallyingBucketBuilder,
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
|
||||
})
|
||||
requestLatencyEqualTallying *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
|
||||
Starts: requestLatencyEqualBuckets,
|
||||
BucketMaker: metrics.TallyingBucketBuilder,
|
||||
requestCount = metrics.NewCounter()
|
||||
requestLatencyBuckets = metrics.LogarithmicSizedBucketsFor(0, 1000)
|
||||
requestLatency = metrics.NewHistogram(&metrics.HistogramSpecification{
|
||||
Starts: requestLatencyBuckets,
|
||||
BucketBuilder: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
|
||||
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
|
||||
})
|
||||
|
||||
startTime *metrics.GaugeMetric = &metrics.GaugeMetric{}
|
||||
startTime = metrics.NewGauge()
|
||||
)
|
||||
|
||||
func init() {
|
||||
startTime.Set(float64(time.Now().Unix()))
|
||||
startTime.Set(nil, float64(time.Now().Unix()))
|
||||
|
||||
DefaultRegistry.Register("requests_metrics_total", "A counter of the total requests made against the telemetry system.", NilLabels, requestCount)
|
||||
DefaultRegistry.Register("requests_metrics_latency_logarithmic_accumulating_microseconds", "A histogram of the response latency for requests made against the telemetry system.", NilLabels, requestLatencyLogarithmicAccumulating)
|
||||
DefaultRegistry.Register("requests_metrics_latency_equal_accumulating_microseconds", "A histogram of the response latency for requests made against the telemetry system.", NilLabels, requestLatencyEqualAccumulating)
|
||||
DefaultRegistry.Register("requests_metrics_latency_logarithmic_tallying_microseconds", "A histogram of the response latency for requests made against the telemetry system.", NilLabels, requestLatencyLogarithmicTallying)
|
||||
DefaultRegistry.Register("request_metrics_latency_equal_tallying_microseconds", "A histogram of the response latency for requests made against the telemetry system.", NilLabels, requestLatencyEqualTallying)
|
||||
DefaultRegistry.Register("telemetry_requests_metrics_total", "A counter of the total requests made against the telemetry system.", NilLabels, requestCount)
|
||||
DefaultRegistry.Register("telemetry_requests_metrics_latency_microseconds", "A histogram of the response latency for requests made against the telemetry system.", NilLabels, requestLatency)
|
||||
|
||||
DefaultRegistry.Register("instance_start_time_seconds", "The time at which the current instance started (UTC).", NilLabels, startTime)
|
||||
}
|
||||
|
|
|
@ -10,11 +10,6 @@ license that can be found in the LICENSE file.
|
|||
The utility package provides general purpose helpers to assist with this
|
||||
library.
|
||||
|
||||
optional.go provides a mechanism for safely getting a set value or falling
|
||||
back to defaults a la a Haskell and Scala Maybe or Guava Optional.
|
||||
|
||||
optional_test.go provides a test complement for the optional.go module.
|
||||
|
||||
priority_queue.go provides a simple priority queue.
|
||||
|
||||
priority_queue_test.go provides a test complement for the priority_queue.go
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
Copyright (c) 2012, Matt T. Proud
|
||||
All rights reserved.
|
||||
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package utility
|
||||
|
||||
type Optional struct {
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func EmptyOptional() *Optional {
|
||||
emission := &Optional{value: nil}
|
||||
|
||||
return emission
|
||||
}
|
||||
|
||||
func Of(value interface{}) *Optional {
|
||||
emission := &Optional{value: value}
|
||||
|
||||
return emission
|
||||
}
|
||||
|
||||
func (o *Optional) IsSet() bool {
|
||||
return o.value != nil
|
||||
}
|
||||
|
||||
func (o *Optional) Get() interface{} {
|
||||
if o.value == nil {
|
||||
panic("Expected a value to be set.")
|
||||
}
|
||||
|
||||
return o.value
|
||||
}
|
||||
|
||||
func (o *Optional) Or(a interface{}) interface{} {
|
||||
if o.IsSet() {
|
||||
return o.Get()
|
||||
}
|
||||
return a
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
Copyright (c) 2012, Matt T. Proud
|
||||
All rights reserved.
|
||||
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the LICENSE file.
|
||||
*/
|
||||
|
||||
package utility
|
||||
|
||||
import (
|
||||
. "github.com/matttproud/gocheck"
|
||||
)
|
||||
|
||||
func (s *S) TestEmptyOptional(c *C) {
|
||||
var o *Optional = EmptyOptional()
|
||||
|
||||
c.Assert(o, Not(IsNil))
|
||||
c.Check(o.IsSet(), Equals, false)
|
||||
c.Assert("default", Equals, o.Or("default"))
|
||||
}
|
||||
|
||||
func (s *S) TestOf(c *C) {
|
||||
var o *Optional = Of(1)
|
||||
|
||||
c.Assert(o, Not(IsNil))
|
||||
c.Check(o.IsSet(), Equals, true)
|
||||
c.Check(o.Get(), Equals, 1)
|
||||
c.Check(o.Or(2), Equals, 1)
|
||||
}
|
|
@ -9,8 +9,8 @@ license that can be found in the LICENSE file.
|
|||
package utility
|
||||
|
||||
type Item struct {
|
||||
Value interface{}
|
||||
Priority int64
|
||||
Value interface{}
|
||||
index int
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright (c) 2013, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package utility
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
delimiter = "|"
|
||||
)
|
||||
|
||||
// LabelsToSignature provides a way of building a unique signature
|
||||
// (i.e., fingerprint) for a given label set sequence.
|
||||
func LabelsToSignature(labels map[string]string) string {
|
||||
// TODO(matt): This is a wart, and we'll want to validate that collisions
|
||||
// do not occur in less-than-diligent environments.
|
||||
cardinality := len(labels)
|
||||
keys := make([]string, 0, cardinality)
|
||||
|
||||
for label := range labels {
|
||||
keys = append(keys, label)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
buffer := bytes.Buffer{}
|
||||
|
||||
for _, label := range keys {
|
||||
buffer.WriteString(label)
|
||||
buffer.WriteString(delimiter)
|
||||
buffer.WriteString(labels[label])
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright (c) 2013, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package utility
|
||||
|
||||
import (
|
||||
"github.com/matttproud/golang_instrumentation/utility/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelsToSignature(t test.Tester) {
|
||||
var scenarios = []struct {
|
||||
in map[string]string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
in: map[string]string{},
|
||||
out: "",
|
||||
},
|
||||
{},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := LabelsToSignature(scenario.in)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %s, got %s", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelToSignature(t *testing.T) {
|
||||
testLabelsToSignature(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignature(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelsToSignature(b)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
// Copyright (c) 2013, Matt T. Proud
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package test
|
||||
|
||||
type Tester interface {
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
}
|
Loading…
Reference in New Issue