Initial commit into version control.

This commit is contained in:
Matt T. Proud 2012-05-19 23:59:25 +02:00
commit 959403ad3e
32 changed files with 3131 additions and 0 deletions

22
.gitignore vendored Normal file
View File

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

22
LICENSE Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2012, Matt T. Proud
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

66
README.md Normal file
View File

@ -0,0 +1,66 @@
# Overview
This [Go](http://golang.org) package is an extraction of a piece of
instrumentation code I whipped-up for a personal project that a friend of mine
and I are working on. We were in need for some rudimentary statistics to
observe behaviors of the server's various components, so this was written.
The code here is not a verbatim copy thereof but rather a thoughtful
re-implementation should other folks need to consume and analyze such telemetry.
N.B. --- I have spent a bit of time working through the model in my head and
probably haven't elucidated my ideas as clearly as I need to. If you examine
src/main.go and src/export/registry.go, you'll find an example of what type of
potential instrumentation use cases this package addresses. There are probably
numerous Go language idiomatic changes that need to be made, but this task has
been deferred for now.
# Continuous Integration
[![Build Status](https://secure.travis-ci.org/matttproud/golang_instrumentation.png?branch=master)](http://travis-ci.org/matttproud/golang_instrumentation)
# Metrics
A metric is a measurement mechanism.
## Gauge
A Gauge is a metric that exposes merely an instantaneous value or some snapshot
thereof.
## Histogram
A Histogram is a metric that captures events or samples into buckets. It
exposes its values via percentile estimations.
### Buckets
A Bucket is a generic container that collects samples and their values. It
prescribes no behavior on its own aside from merely accepting a value,
leaving it up to the concrete implementation to what to do with the injected
values.
#### Accumulating Bucket
An Accumulating Bucket is a bucket that appends the new sample to a timestamped
priority queue such that the eldest values are evicted according to a given
policy.
#### Eviction Policies
Once an Accumulating Bucket reaches capacity, its eviction policy is invoked.
This reaps the oldest N objects subject to certain behavior.
##### Remove Oldest
This merely removes the oldest N items without performing some aggregation
replacement operation on them.
##### Aggregate Oldest
This removes the oldest N items while performing some summary aggregation
operation thereupon, which is then appended to the list in the former values'
place.
#### Tallying Bucket
A Tallying Bucket differs from an Accumulating Bucket in that it never stores
any of the values emitted into it but rather exposes a simplied summary
representation thereof. For instance, if a values therein is requested,
it may situationally emit a minimum, maximum, an average, or any other
reduction mechanism requested.
# Testing
This package employs [gocheck](http://labix.org/gocheck) for testing. Please
ensure that all tests pass by running the following from the project root:
$ go test ./...

5
TODO Normal file
View File

@ -0,0 +1,5 @@
- Validate repository for Go code fluency and idiomatic adherence.
- Decouple HTTP report handler from our project and incorporate into this
repository.
- Implement labeled metric support.
- Evaluate using atomic types versus locks.

130
export/registry.go Normal file
View File

@ -0,0 +1,130 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// registry.go provides a container for centralization exposition of metrics to
// their prospective consumers.
package export
import (
"encoding/json"
"github.com/matttproud/golang_instrumentation/maths"
"github.com/matttproud/golang_instrumentation/metrics"
"log"
"net/http"
"strings"
"sync"
"time"
)
var requestCount *metrics.GaugeMetric = &metrics.GaugeMetric{}
var requestLatencyLogarithmicBuckets []float64 = metrics.LogarithmicSizedBucketsFor(0, 1000)
var requestLatencyEqualBuckets []float64 = metrics.EquallySizedBucketsFor(0, 1000, 10)
var requestLatencyLogarithmicAccumulating *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
Starts: requestLatencyLogarithmicBuckets,
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
})
var requestLatencyEqualAccumulating *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
Starts: requestLatencyEqualBuckets,
BucketMaker: metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
})
var requestLatencyLogarithmicTallying *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
Starts: requestLatencyLogarithmicBuckets,
BucketMaker: metrics.TallyingBucketBuilder,
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
})
var requestLatencyEqualTallying *metrics.Histogram = metrics.CreateHistogram(&metrics.HistogramSpecification{
Starts: requestLatencyEqualBuckets,
BucketMaker: metrics.TallyingBucketBuilder,
ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
})
var requestLatencyAccumulator metrics.CompletionCallback = func(duration time.Duration) {
micros := float64(int64(duration) / 1E3)
requestLatencyLogarithmicAccumulating.Add(micros)
requestLatencyEqualAccumulating.Add(micros)
requestLatencyLogarithmicTallying.Add(micros)
requestLatencyEqualTallying.Add(micros)
}
// Registry is, as the name implies, a registrar where metrics are listed.
//
// In most situations, using DefaultRegistry is sufficient versus creating
// one's own.
type Registry struct {
mutex sync.RWMutex
NameToMetric map[string]metrics.Metric
}
// This builds a new metric registry. It is not needed in the majority of
// cases.
func NewRegistry() *Registry {
return &Registry{
NameToMetric: make(map[string]metrics.Metric),
}
}
// This is the default registry with which Metric objects are associated. It
// is primarily a read-only object after server instantiation.
var DefaultRegistry = NewRegistry()
// Associate a Metric with the DefaultRegistry.
func Register(name string, metric metrics.Metric) {
DefaultRegistry.Register(name, metric)
}
// Register a metric with a given name. Name should be globally unique.
func (r *Registry) Register(name string, metric metrics.Metric) {
r.mutex.Lock()
defer r.mutex.Unlock()
if _, present := r.NameToMetric[name]; !present {
r.NameToMetric[name] = metric
log.Printf("Registered %s.\n", name)
} else {
log.Printf("Attempted to register duplicate %s metric.\n", name)
}
}
func handleJson(w http.ResponseWriter, r *http.Request) {
var instrumentable metrics.InstrumentableCall = func() {
requestCount.Increment()
w.Header().Set("Content-Type", "application/json")
composite := make(map[string]interface{}, len(DefaultRegistry.NameToMetric))
for name, metric := range DefaultRegistry.NameToMetric {
composite[name] = metric.Marshallable()
}
data, _ := json.Marshal(composite)
w.Write(data)
}
metrics.InstrumentCall(instrumentable, requestLatencyAccumulator)
}
// TODO(mtp): Make instance-specific.
var Exporter http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
url := r.URL
if strings.HasSuffix(url.Path, ".json") {
handleJson(w, r)
}
}
func init() {
DefaultRegistry.Register("requests_total", requestCount)
DefaultRegistry.Register("request_latency_logarithmic_accumulating_microseconds", requestLatencyLogarithmicAccumulating)
DefaultRegistry.Register("request_latency_equal_accumulating_microseconds", requestLatencyEqualAccumulating)
DefaultRegistry.Register("request_latency_logarithmic_tallying_microseconds", requestLatencyLogarithmicTallying)
DefaultRegistry.Register("request_latency_equal_tallying_microseconds", requestLatencyEqualTallying)
}

11
main.go Normal file
View File

@ -0,0 +1,11 @@
package main
import (
"github.com/matttproud/golang_instrumentation/export"
"net/http"
)
func main() {
http.Handle("/metrics.json", export.Exporter)
http.ListenAndServe(":8080", nil)
}

39
maths/distributions.go Normal file
View File

@ -0,0 +1,39 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// distributions.go provides basic distribution-generating functions that are
// used primarily in testing contexts.
package maths
import (
"math"
)
// Go's standard library does not offer a factorial function.
func Factorial(of int) int64 {
if of <= 0 {
return 1
}
var result int64 = 1
for i := int64(of); i >= 1; i-- {
result *= i
}
return result
}
// Create calculate the value of a probability density for a given binomial
// statistic, where k is the target count of true cases, n is the number of
// subjects, and p is the probability.
func BinomialPDF(k, n int, p float64) float64 {
binomialCoefficient := float64(Factorial(n)) / float64(Factorial(k)*Factorial(n-k))
intermediate := math.Pow(p, float64(k)) * math.Pow(1-p, float64(n-k))
return binomialCoefficient * intermediate
}

View File

@ -0,0 +1,43 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// helpers_for_testing.go provides a testing assistents for this package and its
// dependents.
package maths
import (
. "launchpad.net/gocheck"
"math"
"reflect"
)
type isNaNChecker struct {
*CheckerInfo
}
// This piece provides a simple tester for the gocheck testing library to
// ascertain if a value is not-a-number.
var IsNaN Checker = &isNaNChecker{
&CheckerInfo{Name: "IsNaN", Params: []string{"value"}},
}
func (checker *isNaNChecker) Check(params []interface{}, names []string) (result bool, error string) {
return isNaN(params[0]), ""
}
func isNaN(obtained interface{}) (result bool) {
if obtained == nil {
result = false
} else {
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Float64:
return math.IsNaN(obtained.(float64))
}
}
return false
}

23
maths/maths_test.go Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// maths_test.go provides a test suite for all tests in the maths package
// hierarchy. It employs the gocheck framework for test scaffolding.
package maths
import (
. "launchpad.net/gocheck"
"testing"
)
type S struct{}
var _ = Suite(&S{})
func TestMaths(t *testing.T) {
TestingT(t)
}

110
maths/statistics.go Normal file
View File

@ -0,0 +1,110 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// statistics.go provides basic summary statistics functions for the purpose of
// metrics aggregation.
// TODO(mtp): Split this out into a summary statistics file once moving/rolling
// averages are calculated.
package maths
import (
"math"
"sort"
)
// ReductionMethod provides a method for reducing metrics into a given scalar
// value.
type ReductionMethod func([]float64) float64
var Average ReductionMethod = func(input []float64) float64 {
count := 0.0
sum := 0.0
for _, v := range input {
sum += v
count++
}
if count == 0 {
return math.NaN()
}
return sum / count
}
// Extract the first modal value.
var FirstMode ReductionMethod = func(input []float64) float64 {
valuesToFrequency := map[float64]int64{}
var largestTally int64 = math.MinInt64
var largestTallyValue float64 = math.NaN()
for _, v := range input {
presentCount, _ := valuesToFrequency[v]
presentCount++
valuesToFrequency[v] = presentCount
if presentCount > largestTally {
largestTally = presentCount
largestTallyValue = v
}
}
return largestTallyValue
}
// Calculate the percentile by choosing the nearest neighboring value.
func NearestRank(input []float64, percentile float64) float64 {
inputSize := len(input)
if inputSize == 0 {
return math.NaN()
}
ordinalRank := math.Ceil(((percentile / 100.0) * float64(inputSize)) + 0.5)
copiedInput := make([]float64, inputSize)
copy(copiedInput, input)
sort.Float64s(copiedInput)
preliminaryIndex := int(ordinalRank) - 1
if preliminaryIndex == inputSize {
return copiedInput[preliminaryIndex-1]
}
return copiedInput[preliminaryIndex]
}
func NearestRankReducer(percentile float64) func(input []float64) float64 {
return func(input []float64) float64 {
return NearestRank(input, percentile)
}
}
var Median ReductionMethod = NearestRankReducer(50)
var Minimum ReductionMethod = func(input []float64) float64 {
var minimum float64 = math.MaxFloat64
for _, v := range input {
minimum = math.Min(minimum, v)
}
return minimum
}
var Maximum ReductionMethod = func(input []float64) float64 {
var maximum float64 = math.SmallestNonzeroFloat64
for _, v := range input {
maximum = math.Max(maximum, v)
}
return maximum
}

121
maths/statistics_test.go Normal file
View File

@ -0,0 +1,121 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// statistics_test.go provides a test complement for the statistics.go module.
package maths
import (
. "launchpad.net/gocheck"
)
func (s *S) TestAverageOnEmpty(c *C) {
empty := []float64{}
var v float64 = Average(empty)
c.Assert(v, IsNaN)
}
func (s *S) TestAverageForSingleton(c *C) {
input := []float64{5}
var v float64 = Average(input)
c.Check(v, Equals, 5.0)
}
func (s *S) TestAverage(c *C) {
input := []float64{5, 15}
var v float64 = Average(input)
c.Check(v, Equals, 10.0)
}
func (s *S) TestFirstModeOnEmpty(c *C) {
input := []float64{}
var v float64 = FirstMode(input)
c.Assert(v, IsNaN)
}
func (s *S) TestFirstModeForSingleton(c *C) {
input := []float64{5}
var v float64 = FirstMode(input)
c.Check(v, Equals, 5.0)
}
func (s *S) TestFirstModeForUnimodal(c *C) {
input := []float64{1, 2, 3, 4, 3}
var v float64 = FirstMode(input)
c.Check(v, Equals, 3.0)
}
func (s *S) TestNearestRankForEmpty(c *C) {
input := []float64{}
c.Assert(NearestRank(input, 0), IsNaN)
c.Assert(NearestRank(input, 50), IsNaN)
c.Assert(NearestRank(input, 100), IsNaN)
}
func (s *S) TestNearestRankForSingleton(c *C) {
input := []float64{5}
c.Check(NearestRank(input, 0), Equals, 5.0)
c.Check(NearestRank(input, 50), Equals, 5.0)
c.Check(NearestRank(input, 100), Equals, 5.0)
}
func (s *S) TestNearestRankForDouble(c *C) {
input := []float64{5, 5}
c.Check(NearestRank(input, 0), Equals, 5.0)
c.Check(NearestRank(input, 50), Equals, 5.0)
c.Check(NearestRank(input, 100), Equals, 5.0)
}
func (s *S) TestNearestRankFor100(c *C) {
input := make([]float64, 100)
for i := 0; i < 100; i++ {
input[i] = float64(i + 1)
}
c.Check(NearestRank(input, 0), Equals, 1.0)
c.Check(NearestRank(input, 50), Equals, 51.0)
c.Check(NearestRank(input, 100), Equals, 100.0)
}
func (s *S) TestNearestRankFor101(c *C) {
input := make([]float64, 101)
for i := 0; i < 101; i++ {
input[i] = float64(i + 1)
}
c.Check(NearestRank(input, 0), Equals, 1.0)
c.Check(NearestRank(input, 50), Equals, 51.0)
c.Check(NearestRank(input, 100), Equals, 101.0)
}
func (s *S) TestMedianReducer(c *C) {
input := []float64{1, 2, 3}
c.Check(Median(input), Equals, 2.0)
}
func (s *S) TestMinimum(c *C) {
input := []float64{5, 1, 10, 1.1, 4}
c.Check(Minimum(input), Equals, 1.0)
}
func (s *S) TestMaximum(c *C) {
input := []float64{5, 1, 10, 1.1, 4}
c.Check(Maximum(input), Equals, 10.0)
}

View File

@ -0,0 +1,112 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// accumulating_bucket.go provides a histogram bucket type that accumulates
// elements until a given capacity and enacts a given eviction policy upon
// such a condition.
package metrics
import (
"bytes"
"container/heap"
"fmt"
"github.com/matttproud/golang_instrumentation/utility"
"math"
"sort"
"sync"
"time"
)
type AccumulatingBucket struct {
observations int
elements utility.PriorityQueue
maximumSize int
mutex sync.RWMutex
evictionPolicy EvictionPolicy
}
func AccumulatingBucketBuilder(evictionPolicy EvictionPolicy, maximumSize int) BucketBuilder {
return func() Bucket {
return &AccumulatingBucket{
maximumSize: maximumSize,
evictionPolicy: evictionPolicy,
elements: make(utility.PriorityQueue, 0, maximumSize),
}
}
}
// Add a value to the bucket. Depending on whether the bucket is full, it may
// trigger an eviction of older items.
func (b *AccumulatingBucket) Add(value float64) {
b.mutex.Lock()
defer b.mutex.Unlock()
b.observations++
size := len(b.elements)
v := utility.Item{
Value: value,
Priority: -1 * time.Now().UnixNano(),
}
if size == b.maximumSize {
b.evictionPolicy(&b.elements)
}
heap.Push(&b.elements, &v)
}
func (b *AccumulatingBucket) Humanize() string {
b.mutex.RLock()
defer b.mutex.RUnlock()
buffer := new(bytes.Buffer)
fmt.Fprintf(buffer, "[AccumulatingBucket with %d elements and %d capacity] { ", len(b.elements), b.maximumSize)
for i := 0; i < len(b.elements); i++ {
fmt.Fprintf(buffer, "%f, ", b.elements[i].Value)
}
fmt.Fprintf(buffer, "}")
return string(buffer.Bytes())
}
func (b *AccumulatingBucket) ValueForIndex(index int) float64 {
b.mutex.RLock()
defer b.mutex.RUnlock()
elementCount := len(b.elements)
if elementCount == 0 {
return math.NaN()
}
rawData := make([]float64, elementCount)
for i, element := range b.elements {
rawData[i] = element.Value.(float64)
}
sort.Float64s(rawData)
// N.B.(mtp): Interfacing components should not need to comprehend what
// evictions strategy is used; therefore, we adjust this silently.
if index >= elementCount {
return rawData[elementCount-1]
}
return rawData[index]
}
func (b *AccumulatingBucket) Observations() int {
b.mutex.RLock()
defer b.mutex.RUnlock()
return b.observations
}

View File

@ -0,0 +1,156 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// accumulating_bucket_test.go provides a test complement for the
// accumulating_bucket_go module.
package metrics
import (
"github.com/matttproud/golang_instrumentation/maths"
"github.com/matttproud/golang_instrumentation/utility"
. "launchpad.net/gocheck"
"time"
)
func (s *S) TestAccumulatingBucketBuilderWithEvictOldest(c *C) {
var evictOldestThree EvictionPolicy = EvictOldest(3)
c.Assert(evictOldestThree, Not(IsNil))
bb := AccumulatingBucketBuilder(evictOldestThree, 5)
c.Assert(bb, Not(IsNil))
var b Bucket = bb()
c.Assert(b, Not(IsNil))
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 0 elements and 5 capacity] { }")
b.Add(1)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 1 elements and 5 capacity] { 1.000000, }")
b.Add(2)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 2 elements and 5 capacity] { 1.000000, 2.000000, }")
b.Add(3)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 3 elements and 5 capacity] { 1.000000, 2.000000, 3.000000, }")
b.Add(4)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 4 elements and 5 capacity] { 1.000000, 2.000000, 3.000000, 4.000000, }")
b.Add(5)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 5 elements and 5 capacity] { 1.000000, 2.000000, 3.000000, 4.000000, 5.000000, }")
b.Add(6)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 3 elements and 5 capacity] { 4.000000, 5.000000, 6.000000, }")
var bucket Bucket = b
c.Assert(bucket, Not(IsNil))
}
func (s *S) TestAccumulatingBucketBuilderWithEvictAndReplaceWithAverage(c *C) {
var evictAndReplaceWithAverage EvictionPolicy = EvictAndReplaceWith(3, maths.Average)
c.Assert(evictAndReplaceWithAverage, Not(IsNil))
bb := AccumulatingBucketBuilder(evictAndReplaceWithAverage, 5)
c.Assert(bb, Not(IsNil))
var b Bucket = bb()
c.Assert(b, Not(IsNil))
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 0 elements and 5 capacity] { }")
b.Add(1)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 1 elements and 5 capacity] { 1.000000, }")
b.Add(2)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 2 elements and 5 capacity] { 1.000000, 2.000000, }")
b.Add(3)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 3 elements and 5 capacity] { 1.000000, 2.000000, 3.000000, }")
b.Add(4)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 4 elements and 5 capacity] { 1.000000, 2.000000, 3.000000, 4.000000, }")
b.Add(5)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 5 elements and 5 capacity] { 1.000000, 2.000000, 3.000000, 4.000000, 5.000000, }")
b.Add(6)
c.Check(b.Humanize(), Equals, "[AccumulatingBucket with 4 elements and 5 capacity] { 4.000000, 5.000000, 2.000000, 6.000000, }")
}
func (s *S) TestAccumulatingBucket(c *C) {
var b AccumulatingBucket = AccumulatingBucket{
elements: make(utility.PriorityQueue, 0, 10),
maximumSize: 5,
}
c.Check(b.elements, HasLen, 0)
c.Check(b.observations, Equals, 0)
c.Check(b.Observations(), Equals, 0)
b.Add(5.0)
c.Check(b.elements, HasLen, 1)
c.Check(b.observations, Equals, 1)
c.Check(b.Observations(), Equals, 1)
b.Add(6.0)
b.Add(7.0)
b.Add(8.0)
b.Add(9.0)
c.Check(b.elements, HasLen, 5)
c.Check(b.observations, Equals, 5)
c.Check(b.Observations(), Equals, 5)
}
func (s *S) TestAccumulatingBucketValueForIndex(c *C) {
var b AccumulatingBucket = AccumulatingBucket{
elements: make(utility.PriorityQueue, 0, 100),
maximumSize: 100,
evictionPolicy: EvictOldest(50),
}
for i := 0; i <= 100; i++ {
c.Assert(b.ValueForIndex(i), maths.IsNaN)
}
// The bucket has only observed one item and contains now one item.
b.Add(1.0)
c.Check(b.ValueForIndex(0), Equals, 1.0)
// Let's sanity check what occurs if presumably an eviction happened and
// we requested an index larger than what is contained.
c.Check(b.ValueForIndex(1), Equals, 1.0)
for i := 2.0; i <= 100; i += 1 {
b.Add(i)
// TODO(mtp): This is a sin. Provide a mechanism for deterministic testing.
time.Sleep(1 * time.Millisecond)
}
c.Check(b.ValueForIndex(0), Equals, 1.0)
c.Check(b.ValueForIndex(50), Equals, 51.0)
c.Check(b.ValueForIndex(99), Equals, 100.0)
c.Check(b.ValueForIndex(100), Equals, 100.0)
for i := 101.0; i <= 150; i += 1 {
b.Add(i)
// TODO(mtp): This is a sin. Provide a mechanism for deterministic testing.
time.Sleep(1 * time.Millisecond)
}
c.Check(b.ValueForIndex(0), Equals, 51.0)
c.Check(b.ValueForIndex(50), Equals, 101.0)
c.Check(b.ValueForIndex(99), Equals, 150.0)
c.Check(b.ValueForIndex(100), Equals, 150.0)
}

17
metrics/base.go Normal file
View File

@ -0,0 +1,17 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// base.go provides fundamental interface expectations for the various metrics.
package metrics
type Metric interface {
// Produce a human-consumable representation of the metric.
Humanize() string
// Produce a JSON-consumable representation of the metric.
// TODO(mtp):
Marshallable() map[string]interface{}
}

27
metrics/bucket.go Normal file
View File

@ -0,0 +1,27 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// bucket.go provides fundamental interface expectations for various bucket
// types.
package metrics
// The Histogram class and associated types build buckets on their own.
type BucketBuilder func() Bucket
// This defines the base Bucket type. The exact behaviors of the bucket are
// at the whim of the implementor.
type Bucket interface {
// Add a value to the bucket.
Add(value float64)
// Provide a humanized representation hereof.
Humanize() string
// Provide a count of observations throughout the bucket's lifetime.
Observations() int
// Provide the value from the given in-memory value cache or an estimate
// thereof.
ValueForIndex(index int) float64
}

51
metrics/eviction.go Normal file
View File

@ -0,0 +1,51 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// eviction.go provides several histogram bucket eviction strategies.
package metrics
import (
"container/heap"
"github.com/matttproud/golang_instrumentation/maths"
"github.com/matttproud/golang_instrumentation/utility"
"time"
)
// EvictionPolicy implements some sort of garbage collection methodology for
// an underlying heap.Interface. This is presently only used for
// AccumulatingBucket.
type EvictionPolicy func(h heap.Interface)
// As the name implies, this evicts the oldest x objects from the heap.
func EvictOldest(count int) EvictionPolicy {
return func(h heap.Interface) {
for i := 0; i < count; i++ {
heap.Pop(h)
}
}
}
// This factory produces an EvictionPolicy that applies some standardized
// reduction methodology on the to-be-terminated values.
//
// TODO(mtp): Parameterize the priority generation since these tools are useful.
func EvictAndReplaceWith(count int, reducer maths.ReductionMethod) EvictionPolicy {
return func(h heap.Interface) {
oldValues := make([]float64, count)
for i := 0; i < count; i++ {
oldValues[i] = heap.Pop(h).(*utility.Item).Value.(float64)
}
reduced := reducer(oldValues)
heap.Push(h, &utility.Item{
Value: reduced,
Priority: -1 * time.Now().UnixNano(),
})
}
}

183
metrics/eviction_test.go Normal file
View File

@ -0,0 +1,183 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// eviction_test.go provides a test complement for the eviction.go module.
package metrics
import (
"container/heap"
"github.com/matttproud/golang_instrumentation/maths"
"github.com/matttproud/golang_instrumentation/utility"
. "launchpad.net/gocheck"
)
func (s *S) TestEvictOldest(c *C) {
q := make(utility.PriorityQueue, 0, 10)
heap.Init(&q)
var e EvictionPolicy = EvictOldest(5)
for i := 0; i < 10; i++ {
var item utility.Item = utility.Item{
Value: float64(i),
Priority: int64(i),
}
heap.Push(&q, &item)
}
c.Check(q, HasLen, 10)
e(&q)
c.Check(q, HasLen, 5)
c.Check(heap.Pop(&q), utility.ValueEquals, 4.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 3.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 2.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 1.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 0.0)
}
// TODO(mtp): Extract reduction mechanisms into local variables.
func (s *S) TestEvictAndReplaceWithAverage(c *C) {
q := make(utility.PriorityQueue, 0, 10)
heap.Init(&q)
var e EvictionPolicy = EvictAndReplaceWith(5, maths.Average)
for i := 0; i < 10; i++ {
var item utility.Item = utility.Item{
Value: float64(i),
Priority: int64(i),
}
heap.Push(&q, &item)
}
c.Check(q, HasLen, 10)
e(&q)
c.Check(q, HasLen, 6)
c.Check(heap.Pop(&q), utility.ValueEquals, 4.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 3.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 2.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 1.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 0.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 7.0)
}
func (s *S) TestEvictAndReplaceWithMedian(c *C) {
q := make(utility.PriorityQueue, 0, 10)
heap.Init(&q)
var e EvictionPolicy = EvictAndReplaceWith(5, maths.Median)
for i := 0; i < 10; i++ {
var item utility.Item = utility.Item{
Value: float64(i),
Priority: int64(i),
}
heap.Push(&q, &item)
}
c.Check(q, HasLen, 10)
e(&q)
c.Check(q, HasLen, 6)
c.Check(heap.Pop(&q), utility.ValueEquals, 4.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 3.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 2.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 1.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 0.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 7.0)
}
func (s *S) TestEvictAndReplaceWithFirstMode(c *C) {
q := make(utility.PriorityQueue, 0, 10)
heap.Init(&q)
e := EvictAndReplaceWith(5, maths.FirstMode)
for i := 0; i < 10; i++ {
heap.Push(&q, &utility.Item{
Value: float64(i),
Priority: int64(i),
})
}
c.Check(q, HasLen, 10)
e(&q)
c.Check(q, HasLen, 6)
c.Check(heap.Pop(&q), utility.ValueEquals, 4.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 3.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 2.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 1.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 0.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 9.0)
}
func (s *S) TestEvictAndReplaceWithMinimum(c *C) {
q := make(utility.PriorityQueue, 0, 10)
heap.Init(&q)
var e EvictionPolicy = EvictAndReplaceWith(5, maths.Minimum)
for i := 0; i < 10; i++ {
var item utility.Item = utility.Item{
Value: float64(i),
Priority: int64(i),
}
heap.Push(&q, &item)
}
c.Check(q, HasLen, 10)
e(&q)
c.Check(q, HasLen, 6)
c.Check(heap.Pop(&q), utility.ValueEquals, 4.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 3.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 2.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 1.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 0.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 5.0)
}
func (s *S) TestEvictAndReplaceWithMaximum(c *C) {
q := make(utility.PriorityQueue, 0, 10)
heap.Init(&q)
var e EvictionPolicy = EvictAndReplaceWith(5, maths.Maximum)
for i := 0; i < 10; i++ {
var item utility.Item = utility.Item{
Value: float64(i),
Priority: int64(i),
}
heap.Push(&q, &item)
}
c.Check(q, HasLen, 10)
e(&q)
c.Check(q, HasLen, 6)
c.Check(heap.Pop(&q), utility.ValueEquals, 4.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 3.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 2.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 1.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 0.0)
c.Check(heap.Pop(&q), utility.ValueEquals, 9.0)
}

87
metrics/gauge.go Normal file
View File

@ -0,0 +1,87 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// gauge.go provides a scalar metric that one can monitor. It is useful for
// certain cases, such as instantaneous temperature.
package metrics
import (
"fmt"
"sync"
)
// A gauge metric merely provides an instantaneous representation of a scalar
// value or an accumulation. For instance, if one wants to expose the current
// temperature or the hitherto bandwidth used, this would be the metric for such
// circumstances.
type GaugeMetric struct {
value float64
mutex sync.RWMutex
}
func (metric *GaugeMetric) Humanize() string {
formatString := "[GaugeMetric; value=%f]"
metric.mutex.RLock()
defer metric.mutex.RUnlock()
return fmt.Sprintf(formatString, metric.value)
}
func (metric *GaugeMetric) Set(value float64) float64 {
metric.mutex.Lock()
defer metric.mutex.Unlock()
metric.value = value
return metric.value
}
func (metric *GaugeMetric) IncrementBy(value float64) float64 {
metric.mutex.Lock()
defer metric.mutex.Unlock()
metric.value += value
return metric.value
}
func (metric *GaugeMetric) Increment() float64 {
return metric.IncrementBy(1)
}
func (metric *GaugeMetric) DecrementBy(value float64) float64 {
metric.mutex.Lock()
defer metric.mutex.Unlock()
metric.value -= value
return metric.value
}
func (metric *GaugeMetric) Decrement() float64 {
return metric.DecrementBy(1)
}
func (metric *GaugeMetric) Get() float64 {
metric.mutex.RLock()
defer metric.mutex.RUnlock()
return metric.value
}
func (metric *GaugeMetric) Marshallable() map[string]interface{} {
metric.mutex.RLock()
defer metric.mutex.RUnlock()
v := make(map[string]interface{}, 2)
v["value"] = metric.value
v["type"] = "gauge"
return v
}

87
metrics/gauge_test.go Normal file
View File

@ -0,0 +1,87 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// gauge_test.go provides a test complement for the gauge.go module.
package metrics
import (
. "launchpad.net/gocheck"
)
func (s *S) TestCreate(c *C) {
m := GaugeMetric{value: 1.0}
c.Assert(m, Not(IsNil))
c.Check(m.Get(), Equals, 1.0)
}
func (s *S) TestHumanize(c *C) {
m := GaugeMetric{value: 2.0}
c.Check(m.Humanize(), Equals, "[GaugeMetric; value=2.000000]")
}
func (s *S) TestSet(c *C) {
m := GaugeMetric{value: -1.0}
m.Set(-99.0)
c.Check(m.Get(), Equals, -99.0)
}
func (s *S) TestIncrementBy(c *C) {
m := GaugeMetric{value: 1.0}
m.IncrementBy(1.5)
c.Check(m.Get(), Equals, 2.5)
c.Check(m.Humanize(), Equals, "[GaugeMetric; value=2.500000]")
}
func (s *S) TestIncrement(c *C) {
m := GaugeMetric{value: 1.0}
m.Increment()
c.Check(m.Get(), Equals, 2.0)
c.Check(m.Humanize(), Equals, "[GaugeMetric; value=2.000000]")
}
func (s *S) TestDecrementBy(c *C) {
m := GaugeMetric{value: 1.0}
m.DecrementBy(1.0)
c.Check(m.Get(), Equals, 0.0)
c.Check(m.Humanize(), Equals, "[GaugeMetric; value=0.000000]")
}
func (s *S) TestDecrement(c *C) {
m := GaugeMetric{value: 1.0}
m.Decrement()
c.Check(m.Get(), Equals, 0.0)
c.Check(m.Humanize(), Equals, "[GaugeMetric; value=0.000000]")
}
func (s *S) TestGaugeMetricMarshallable(c *C) {
m := GaugeMetric{value: 1.0}
returned := m.Marshallable()
c.Assert(returned, Not(IsNil))
c.Check(returned, HasLen, 2)
c.Check(returned["value"], Equals, 1.0)
c.Check(returned["type"], Equals, "gauge")
}
func (s *S) TestGaugeAsMetric(c *C) {
var metric Metric = &GaugeMetric{value: 1.0}
c.Assert(metric, Not(IsNil))
}

258
metrics/histogram.go Normal file
View File

@ -0,0 +1,258 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// histogram.go provides a basic histogram metric, which can accumulate scalar
// event values or samples. The underlying histogram implementation is designed
// to be performant in that it accepts tolerable inaccuracies.
// TOOD(mtp): Implement visualization and exporting.
package metrics
import (
"bytes"
"fmt"
"math"
"strconv"
)
// This generates count-buckets of equal size distributed along the open
// interval of lower to upper. For instance, {lower=0, upper=10, count=5}
// yields the following: [0, 2, 4, 6, 8].
func EquallySizedBucketsFor(lower, upper float64, count int) []float64 {
buckets := make([]float64, count)
partitionSize := (upper - lower) / float64(count)
for i := 0; i < count; i++ {
m := float64(i)
buckets[i] = lower + (m * partitionSize)
}
return buckets
}
// This generates log2-sized buckets spanning from lower to upper inclusively
// as well as values beyond it.
func LogarithmicSizedBucketsFor(lower, upper float64) []float64 {
bucketCount := int(math.Ceil(math.Log2(upper)))
buckets := make([]float64, bucketCount)
for i, j := 0, 0.0; i < bucketCount; i, j = i+1, math.Pow(2, float64(i+1.0)) {
buckets[i] = j
}
return buckets
}
// A HistogramSpecification defines how a Histogram is to be built.
type HistogramSpecification struct {
Starts []float64
BucketMaker BucketBuilder
ReportablePercentiles []float64
}
// The histogram is an accumulator for samples. It merely routes into which
// to bucket to capture an event and provides a percentile calculation
// mechanism.
//
// Histogram makes do without locking by employing the law of large numbers
// to presume a convergence toward a given bucket distribution. Locking
// may be implemented in the buckets themselves, though.
type Histogram struct {
// This represents the open interval's start at which values shall be added to
// the bucket. The interval continues until the beginning of the next bucket
// exclusive or positive infinity.
//
// N.B.
// - bucketStarts should be sorted in ascending order;
// - len(bucketStarts) must be equivalent to len(buckets);
// - The index of a given bucketStarts' element is presumed to match
// correspond to the appropriate element in buckets.
bucketStarts []float64
// These are the buckets that capture samples as they are emitted to the
// histogram. Please consult the reference interface and its implements for
// further details about behavior expectations.
buckets []Bucket
// These are the percentile values that will be reported on marshalling.
reportablePercentiles []float64
}
func (h *Histogram) Add(value float64) {
lastIndex := 0
for i, bucketStart := range h.bucketStarts {
if value < bucketStart {
break
}
lastIndex = i
}
h.buckets[lastIndex].Add(value)
}
func (h *Histogram) Humanize() string {
stringBuffer := bytes.NewBufferString("")
stringBuffer.WriteString("[Histogram { ")
for i, bucketStart := range h.bucketStarts {
bucket := h.buckets[i]
stringBuffer.WriteString(fmt.Sprintf("[%f, inf) = %s, ", bucketStart, bucket.Humanize()))
}
stringBuffer.WriteString("}]")
return string(stringBuffer.Bytes())
}
// Find what bucket and element index contains a given percentile value.
// If a percentile is requested that results in a corresponding index that is no
// longer contained by the bucket, the index of the last item is returned. This
// may occur if the underlying bucket catalogs values and employs an eviction
// strategy.
func (h *Histogram) bucketForPercentile(percentile float64) (bucket *Bucket, index int) {
var totalObservations int = 0
for _, bucket := range h.buckets {
totalObservations += bucket.Observations()
}
expectedIndex := int(math.Floor(percentile * float64(totalObservations)))
var accumulatedObservations int = 0
var lastBucket Bucket = nil
var lastAccumulatedObservations int = 0
for _, bucket := range h.buckets {
if lastBucket == nil {
lastBucket = bucket
}
observations := bucket.Observations()
accumulatedObservations += observations
if observations == 0 {
continue
}
if accumulatedObservations > expectedIndex {
break
} else if accumulatedObservations == expectedIndex {
lastBucket = bucket
break
}
lastAccumulatedObservations = accumulatedObservations
lastBucket = bucket
}
var offset int
offset = int(expectedIndex - lastAccumulatedObservations)
if offset > 0 {
offset--
}
return &lastBucket, offset
}
func previousCumulativeObservations(cumulativeObservations []int, bucketIndex int) int {
if bucketIndex == 0 {
return 0
}
return cumulativeObservations[bucketIndex-1]
}
func prospectiveIndexForPercentile(percentile float64, totalObservations int) int {
return int(math.Floor(percentile * float64(totalObservations)))
}
func (h *Histogram) bucketForPercentile2(percentile float64) (bucket *Bucket, index int) {
bucketCount := len(h.buckets)
observationsByBucket := make([]int, bucketCount)
cumulativeObservationsByBucket := make([]int, bucketCount)
cumulativePercentagesByBucket := make([]float64, bucketCount)
var totalObservations int = 0
for i, bucket := range h.buckets {
observations := bucket.Observations()
observationsByBucket[i] = observations
totalObservations += bucket.Observations()
cumulativeObservationsByBucket[i] = totalObservations
}
for i, _ := range h.buckets {
cumulativePercentagesByBucket[i] = float64(cumulativeObservationsByBucket[i]) / float64(totalObservations)
}
prospectiveIndex := prospectiveIndexForPercentile(percentile, totalObservations)
for i, cumulativeObservation := range cumulativeObservationsByBucket {
if cumulativeObservation == 0 {
continue
}
if cumulativeObservation >= prospectiveIndex {
var subIndex int
subIndex = prospectiveIndex - previousCumulativeObservations(cumulativeObservationsByBucket, i)
if observationsByBucket[i] == subIndex {
subIndex--
}
return &h.buckets[i], subIndex
}
}
return &h.buckets[0], 0
}
// Return the histogram's estimate of the value for a given percentile of
// collected samples. The requested percentile is expected to be a real
// value within (0, 1.0].
func (h *Histogram) Percentile(percentile float64) float64 {
bucket, index := h.bucketForPercentile2(percentile)
return (*bucket).ValueForIndex(index)
}
func (h *Histogram) Marshallable() map[string]interface{} {
numberOfPercentiles := len(h.reportablePercentiles)
result := make(map[string]interface{}, 2)
result["type"] = "histogram"
value := make(map[string]interface{}, numberOfPercentiles)
for _, percentile := range h.reportablePercentiles {
percentileString := strconv.FormatFloat(percentile, 'f', 6, 64)
value[percentileString] = strconv.FormatFloat(h.Percentile(percentile), 'f', 6, 64)
}
result["value"] = value
return result
}
// Produce a histogram from a given specification.
func CreateHistogram(specification *HistogramSpecification) *Histogram {
bucketCount := len(specification.Starts)
metric := &Histogram{
bucketStarts: specification.Starts,
buckets: make([]Bucket, bucketCount),
reportablePercentiles: specification.ReportablePercentiles,
}
for i := 0; i < bucketCount; i++ {
metric.buckets[i] = specification.BucketMaker()
}
return metric
}

974
metrics/histogram_test.go Normal file
View File

@ -0,0 +1,974 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// histogram_test.go provides a test complement for the histogram.go module.
package metrics
import (
"github.com/matttproud/golang_instrumentation/maths"
. "launchpad.net/gocheck"
)
func (s *S) TestEquallySizedBucketsFor(c *C) {
h := EquallySizedBucketsFor(0, 10, 5)
c.Assert(h, Not(IsNil))
c.Check(h, HasLen, 5)
c.Check(h[0], Equals, 0.0)
c.Check(h[1], Equals, 2.0)
c.Check(h[2], Equals, 4.0)
c.Check(h[3], Equals, 6.0)
c.Check(h[4], Equals, 8.0)
}
func (s *S) TestLogarithmicSizedBucketsFor(c *C) {
h := LogarithmicSizedBucketsFor(0, 2048)
c.Assert(h, Not(IsNil))
c.Check(h, HasLen, 11)
c.Check(h[0], Equals, 0.0)
c.Check(h[1], Equals, 2.0)
c.Check(h[2], Equals, 4.0)
c.Check(h[3], Equals, 8.0)
c.Check(h[4], Equals, 16.0)
c.Check(h[5], Equals, 32.0)
c.Check(h[6], Equals, 64.0)
c.Check(h[7], Equals, 128.0)
c.Check(h[8], Equals, 256.0)
c.Check(h[9], Equals, 512.0)
c.Check(h[10], Equals, 1024.0)
}
func (s *S) TestCreateHistogram(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 10, 5),
BucketMaker: TallyingBucketBuilder,
}
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
c.Check(h.Humanize(), Equals, "[Histogram { [0.000000, inf) = [TallyingBucket (Empty)], [2.000000, inf) = [TallyingBucket (Empty)], [4.000000, inf) = [TallyingBucket (Empty)], [6.000000, inf) = [TallyingBucket (Empty)], [8.000000, inf) = [TallyingBucket (Empty)], }]")
h.Add(1)
c.Check(h.Humanize(), Equals, "[Histogram { [0.000000, inf) = [TallyingBucket (1.000000, 1.000000); 1 items], [2.000000, inf) = [TallyingBucket (Empty)], [4.000000, inf) = [TallyingBucket (Empty)], [6.000000, inf) = [TallyingBucket (Empty)], [8.000000, inf) = [TallyingBucket (Empty)], }]")
}
func (s *S) TestBucketForPercentile(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 100, 100),
BucketMaker: TallyingBucketBuilder,
}
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
var bucket *Bucket = nil
var subindex int = 0
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(1.0)
bucket, subindex = h.bucketForPercentile(0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 0)
bucket, subindex = h.bucketForPercentile(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 0)
bucket, subindex = h.bucketForPercentile(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
for i := 2.0; i <= 100.0; i++ {
h.Add(i)
}
bucket, subindex = h.bucketForPercentile(0.05)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
for i := 0; i < 50; i++ {
h.Add(50)
h.Add(51)
}
bucket, subindex = h.bucketForPercentile(0.50)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 50)
c.Check((*bucket).Observations(), Equals, 51)
bucket, subindex = h.bucketForPercentile(0.51)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 51)
}
func (s *S) TestBucketForPercentileSingleton(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 3, 3),
BucketMaker: TallyingBucketBuilder,
}
var h *Histogram = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
var bucket *Bucket = nil
var subindex int = 0
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(0.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
h = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(1.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
h = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(2.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
}
func (s *S) TestBucketForPercentileDoubleInSingleBucket(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 3, 3),
BucketMaker: TallyingBucketBuilder,
}
var h *Histogram = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
var bucket *Bucket = nil
var subindex int = 0
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(0.0)
h.Add(0.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 2)
h = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(1.0)
h.Add(1.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 2)
h = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(2.0)
h.Add(2.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 2)
}
func (s *S) TestBucketForPercentileTripleInSingleBucket(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 3, 3),
BucketMaker: TallyingBucketBuilder,
}
var h *Histogram = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
var bucket *Bucket = nil
var subindex int = 0
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(0.0)
h.Add(0.0)
h.Add(0.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 3)
h = CreateHistogram(hs)
h.Add(1.0)
h.Add(1.0)
h.Add(1.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 3)
h = CreateHistogram(hs)
h.Add(2.0)
h.Add(2.0)
h.Add(2.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 2)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 3)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 3)
}
func (s *S) TestBucketForPercentileTwoEqualAdjacencies(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 3, 3),
BucketMaker: TallyingBucketBuilder,
}
var h *Histogram = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
var bucket *Bucket = nil
var subindex int = 0
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(0.0)
h.Add(1.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
h = CreateHistogram(hs)
h.Add(1.0)
h.Add(2.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
}
func (s *S) TestBucketForPercentileTwoAdjacenciesUnequal(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 3, 3),
BucketMaker: TallyingBucketBuilder,
}
var h *Histogram = CreateHistogram(hs)
c.Assert(h, Not(IsNil))
var bucket *Bucket = nil
var subindex int = 0
for i := 0.0; i < 1.0; i += 0.01 {
bucket, subindex := h.bucketForPercentile2(i)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
}
h.Add(0.0)
h.Add(0.0)
h.Add(1.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 2)
h = CreateHistogram(hs)
h.Add(0.0)
h.Add(1.0)
h.Add(1.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
h = CreateHistogram(hs)
h.Add(1.0)
h.Add(1.0)
h.Add(2.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 2)
h = CreateHistogram(hs)
h.Add(1.0)
h.Add(2.0)
h.Add(2.0)
bucket, subindex = h.bucketForPercentile2(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.67)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(2.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 1)
c.Check((*bucket).Observations(), Equals, 2)
bucket, subindex = h.bucketForPercentile2(0.5)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(1.0 / 3.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile2(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
}
func (s *S) TestBucketForPercentileWithBinomialApproximation(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 5, 6),
BucketMaker: TallyingBucketBuilder,
}
c.Assert(hs, Not(IsNil))
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
n := 5
p := 0.5
for k := 0; k < 6; k++ {
limit := 1000000.0 * maths.BinomialPDF(k, n, p)
for j := 0.0; j < limit; j++ {
h.Add(float64(k))
}
}
var bucket *Bucket = nil
var subindex int = 0
bucket, subindex = h.bucketForPercentile(0.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 31250)
bucket, subindex = h.bucketForPercentile(0.03125)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 31249)
c.Check((*bucket).Observations(), Equals, 31250)
bucket, subindex = h.bucketForPercentile(0.1875)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 156249)
c.Check((*bucket).Observations(), Equals, 156250)
bucket, subindex = h.bucketForPercentile(0.50)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 312499)
c.Check((*bucket).Observations(), Equals, 312500)
bucket, subindex = h.bucketForPercentile(0.8125)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 312499)
c.Check((*bucket).Observations(), Equals, 312500)
bucket, subindex = h.bucketForPercentile(0.96875)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 156249)
c.Check((*bucket).Observations(), Equals, 156250)
bucket, subindex = h.bucketForPercentile(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 31249)
c.Check((*bucket).Observations(), Equals, 31250)
}
func (s *S) TestBucketForPercentileWithUniform(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 100, 100),
BucketMaker: TallyingBucketBuilder,
}
c.Assert(hs, Not(IsNil))
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
for i := 0.0; i <= 99.0; i++ {
h.Add(i)
}
for i := 0; i <= 99; i++ {
c.Check(h.bucketStarts[i], Equals, float64(i))
}
for i := 1; i <= 100; i++ {
c.Check(h.buckets[i-1].Observations(), Equals, 1)
}
var bucket *Bucket = nil
var subindex int = 0
bucket, subindex = h.bucketForPercentile(0.01)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
bucket, subindex = h.bucketForPercentile(1.0)
c.Assert(*bucket, Not(IsNil))
c.Check(subindex, Equals, 0)
c.Check((*bucket).Observations(), Equals, 1)
}
func (s *S) TestHistogramPercentileUniform(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 100, 100),
BucketMaker: TallyingBucketBuilder,
}
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
for i := 0.0; i <= 99.0; i++ {
h.Add(i)
}
c.Check(h.Percentile(0.01), Equals, 0.0)
c.Check(h.Percentile(0.49), Equals, 48.0)
c.Check(h.Percentile(0.50), Equals, 49.0)
c.Check(h.Percentile(0.51), Equals, 50.0)
c.Check(h.Percentile(1.0), Equals, 99.0)
}
func (s *S) TestHistogramPercentileBinomialApproximation(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 5, 6),
BucketMaker: TallyingBucketBuilder,
}
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
n := 5
p := 0.5
for k := 0; k < 6; k++ {
limit := 1000000.0 * maths.BinomialPDF(k, n, p)
for j := 0.0; j < limit; j++ {
h.Add(float64(k))
}
}
c.Check(h.Percentile(0.0), Equals, 0.0)
c.Check(h.Percentile(0.03125), Equals, 0.0)
c.Check(h.Percentile(0.1875), Equals, 1.0)
c.Check(h.Percentile(0.5), Equals, 2.0)
c.Check(h.Percentile(0.8125), Equals, 3.0)
c.Check(h.Percentile(0.96875), Equals, 4.0)
c.Check(h.Percentile(1.0), Equals, 5.0)
}
func (s *S) TestHistogramMarshallable(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 5, 6),
BucketMaker: TallyingBucketBuilder,
ReportablePercentiles: []float64{0.03125, 0.1875, 0.5, 0.8125, 0.96875, 1.0},
}
h := CreateHistogram(hs)
c.Assert(h, Not(IsNil))
n := 5
p := 0.5
for k := 0; k < 6; k++ {
limit := 1000000.0 * maths.BinomialPDF(k, n, p)
for j := 0.0; j < limit; j++ {
h.Add(float64(k))
}
}
m := h.Marshallable()
c.Assert(m, Not(IsNil))
c.Check(m, HasLen, 2)
c.Check(m["type"], Equals, "histogram")
var v map[string]interface{} = m["value"].(map[string]interface{})
c.Assert(v, Not(IsNil))
c.Check(v, HasLen, 6)
c.Check(v["0.031250"], Equals, "0.000000")
c.Check(v["0.187500"], Equals, "1.000000")
c.Check(v["0.500000"], Equals, "2.000000")
c.Check(v["0.812500"], Equals, "3.000000")
c.Check(v["0.968750"], Equals, "4.000000")
c.Check(v["1.000000"], Equals, "5.000000")
}
func (s *S) TestHistogramAsMetric(c *C) {
hs := &HistogramSpecification{
Starts: EquallySizedBucketsFor(0, 5, 6),
BucketMaker: TallyingBucketBuilder,
ReportablePercentiles: []float64{0.0, 0.03125, 0.1875, 0.5, 0.8125, 0.96875, 1.0},
}
h := CreateHistogram(hs)
var metric Metric = h
c.Assert(metric, Not(IsNil))
}

23
metrics/metrics_test.go Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// metrics_test.go provides a test suite for all tests in the metrics package
// hierarchy. It employs the gocheck framework for test scaffolding.
package metrics
import (
. "launchpad.net/gocheck"
"testing"
)
type S struct{}
var _ = Suite(&S{})
func TestMetrics(t *testing.T) {
TestingT(t)
}

143
metrics/tallying_bucket.go Normal file
View File

@ -0,0 +1,143 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// tallying_bucket.go provides a histogram bucket type that aggregates tallies
// of events that fall into its ranges versus a summary of the values
// themselves.
package metrics
import (
"fmt"
"github.com/matttproud/golang_instrumentation/maths"
"math"
"sync"
)
const (
lowerThird = 100.0 / 3.0
upperThird = 2.0 * (100.0 / 3.0)
)
// A TallyingIndexEstimator is responsible for estimating the value of index for
// a given TallyingBucket, even though a TallyingBucket does not possess a
// collection of samples. There are a few strategies listed below for how
// this value should be approximated.
type TallyingIndexEstimator func(minimum, maximum float64, index, observations int) float64
// Provide a filter for handling empty buckets.
func emptyFilter(e TallyingIndexEstimator) TallyingIndexEstimator {
return func(minimum, maximum float64, index, observations int) float64 {
if observations == 0 {
return math.NaN()
}
return e(minimum, maximum, index, observations)
}
}
// Report the smallest observed value in the bucket.
var Minimum TallyingIndexEstimator = emptyFilter(func(minimum, maximum float64, _, observations int) float64 {
return minimum
})
// Report the largest observed value in the bucket.
var Maximum TallyingIndexEstimator = emptyFilter(func(minimum, maximum float64, _, observations int) float64 {
return maximum
})
// Report the average of the extrema.
var Average TallyingIndexEstimator = emptyFilter(func(minimum, maximum float64, _, observations int) float64 {
return maths.Average([]float64{minimum, maximum})
})
// Report the minimum value of the index is in the lower-third of observations,
// the average if in the middle-third, and the maximum if in the largest third.
var Uniform TallyingIndexEstimator = emptyFilter(func(minimum, maximum float64, index, observations int) float64 {
if observations == 1 {
return minimum
}
location := float64(index) / float64(observations)
if location > upperThird {
return maximum
} else if location < lowerThird {
return minimum
}
return maths.Average([]float64{minimum, maximum})
})
// A TallyingBucket is a Bucket that tallies when an object is added to it.
// Upon insertion, an object is compared against collected extrema and noted
// as a new minimum or maximum if appropriate.
type TallyingBucket struct {
observations int
smallestObserved float64
largestObserved float64
mutex sync.RWMutex
estimator TallyingIndexEstimator
}
func (b *TallyingBucket) Add(value float64) {
b.mutex.Lock()
defer b.mutex.Unlock()
b.observations += 1
b.smallestObserved = math.Min(value, b.smallestObserved)
b.largestObserved = math.Max(value, b.largestObserved)
}
func (b *TallyingBucket) Humanize() string {
b.mutex.RLock()
defer b.mutex.RUnlock()
observations := b.observations
if observations == 0 {
return fmt.Sprintf("[TallyingBucket (Empty)]")
}
return fmt.Sprintf("[TallyingBucket (%f, %f); %d items]", b.smallestObserved, b.largestObserved, observations)
}
func (b *TallyingBucket) Observations() int {
b.mutex.RLock()
defer b.mutex.RUnlock()
return b.observations
}
func (b *TallyingBucket) ValueForIndex(index int) float64 {
b.mutex.RLock()
defer b.mutex.RUnlock()
return b.estimator(b.smallestObserved, b.largestObserved, index, b.observations)
}
// Produce a TallyingBucket with sane defaults.
func DefaultTallyingBucket() TallyingBucket {
return TallyingBucket{
smallestObserved: math.MaxFloat64,
largestObserved: math.SmallestNonzeroFloat64,
estimator: Minimum,
}
}
func CustomTallyingBucket(estimator TallyingIndexEstimator) TallyingBucket {
return TallyingBucket{
smallestObserved: math.MaxFloat64,
largestObserved: math.SmallestNonzeroFloat64,
estimator: estimator,
}
}
// This is used strictly for testing.
func TallyingBucketBuilder() Bucket {
b := DefaultTallyingBucket()
return &b
}

View File

@ -0,0 +1,84 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// tallying_bucket_test.go provides a test complement for the
// tallying_bucket.go module.
package metrics
import (
"github.com/matttproud/golang_instrumentation/maths"
. "launchpad.net/gocheck"
)
func (s *S) TestTallyingPercentileEstimatorMinimum(c *C) {
c.Assert(Minimum(-2, -1, 0, 0), maths.IsNaN)
c.Check(Minimum(-2, -1, 0, 1), Equals, -2.0)
}
func (s *S) TestTallyingPercentileEstimatorMaximum(c *C) {
c.Assert(Maximum(-2, -1, 0, 0), maths.IsNaN)
c.Check(Maximum(-2, -1, 0, 1), Equals, -1.0)
}
func (s *S) TestTallyingPercentilesEstimatorAverage(c *C) {
c.Assert(Average(-2, -1, 0, 0), maths.IsNaN)
c.Check(Average(-2, -2, 0, 1), Equals, -2.0)
c.Check(Average(-1, -1, 0, 1), Equals, -1.0)
c.Check(Average(1, 1, 0, 2), Equals, 1.0)
c.Check(Average(2, 1, 0, 2), Equals, 1.5)
}
func (s *S) TestTallyingPercentilesEstimatorUniform(c *C) {
c.Assert(Uniform(-5, 5, 0, 0), maths.IsNaN)
// TODO(mtp): Rewrite.
// for i := 0.0; i < 33.3; i += 0.1 {
// c.Check(Uniform(-5, 5, i, 2), Equals, -5.0)
// }
// for i := 33.4; i < 66.0; i += 0.1 {
// c.Check(Uniform(-5, 5, i, 2), Equals, 0.0)
// }
// for i := 66.7; i < 100.0; i += 0.1 {
// c.Check(Uniform(-5, 5, i, 2), Equals, 5.0)
// }
}
func (s *S) TestTallyingBucketBuilder(c *C) {
var bucket Bucket = TallyingBucketBuilder()
c.Assert(bucket, Not(IsNil))
}
func (s *S) TestTallyingBucketHumanize(c *C) {
bucket := TallyingBucket{
observations: 3,
smallestObserved: 2.0,
largestObserved: 5.5,
}
c.Check(bucket.Humanize(), Equals, "[TallyingBucket (2.000000, 5.500000); 3 items]")
}
func (s *S) TestTallyingBucketAdd(c *C) {
b := DefaultTallyingBucket()
b.Add(1)
c.Check(b.observations, Equals, 1)
c.Check(b.Observations(), Equals, 1)
c.Check(b.smallestObserved, Equals, 1.0)
c.Check(b.largestObserved, Equals, 1.0)
b.Add(2)
c.Check(b.observations, Equals, 2)
c.Check(b.Observations(), Equals, 2)
c.Check(b.smallestObserved, Equals, 1.0)
c.Check(b.largestObserved, Equals, 2.0)
}

55
metrics/timer.go Normal file
View File

@ -0,0 +1,55 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// timer.go provides a scalar metric that times how long a given event takes.
package metrics
import (
"time"
)
// This callback is called upon the completion of the timer—i.e., when it stops.
type CompletionCallback func(duration time.Duration)
// This is meant to capture a function that a StopWatch can call for purposes
// of instrumentation.
type InstrumentableCall func()
type StopWatch struct {
startTime time.Time
endTime time.Time
onCompletion CompletionCallback
}
// Return a new StopWatch that is ready for instrumentation.
func Start(onCompletion CompletionCallback) *StopWatch {
return &StopWatch{
startTime: time.Now(),
onCompletion: onCompletion,
}
}
// Stop the StopWatch returning the elapsed duration of its lifetime while
// firing an optional CompletionCallback in the background.
func (s *StopWatch) Stop() time.Duration {
s.endTime = time.Now()
duration := s.endTime.Sub(s.startTime)
if s.onCompletion != nil {
go s.onCompletion(duration)
}
return duration
}
// Provide a quick way of instrumenting a InstrumentableCall and emitting its
// duration.
func InstrumentCall(instrumentable InstrumentableCall, onCompletion CompletionCallback) time.Duration {
s := Start(onCompletion)
instrumentable()
return s.Stop()
}

68
metrics/timer_test.go Normal file
View File

@ -0,0 +1,68 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// timer_test.go provides a test complement for the timer.go module.
package metrics
import (
. "launchpad.net/gocheck"
"time"
)
func (s *S) TestTimerStart(c *C) {
stopWatch := Start(nil)
c.Assert(stopWatch, Not(IsNil))
c.Assert(stopWatch.startTime, Not(IsNil))
}
func (s *S) TestTimerStop(c *C) {
done := make(chan bool)
var callbackInvoked bool = false
var complete CompletionCallback = func(duration time.Duration) {
callbackInvoked = true
done <- true
}
stopWatch := Start(complete)
c.Check(callbackInvoked, Equals, false)
d := stopWatch.Stop()
<-done
c.Assert(d, Not(IsNil))
c.Check(callbackInvoked, Equals, true)
}
func (s *S) TestInstrumentCall(c *C) {
var callbackInvoked bool = false
var instrumentableInvoked bool = false
done := make(chan bool, 2)
var complete CompletionCallback = func(duration time.Duration) {
callbackInvoked = true
done <- true
}
var instrumentable InstrumentableCall = func() {
instrumentableInvoked = true
done <- true
}
d := InstrumentCall(instrumentable, complete)
c.Assert(d, Not(IsNil))
<-done
<-done
c.Check(instrumentableInvoked, Equals, true)
c.Check(callbackInvoked, Equals, true)
}

45
utility/optional.go Normal file
View File

@ -0,0 +1,45 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// optional.go provides a mechanism for safely getting a set value or falling
// back to defaults.
package utility
type Optional struct {
value interface{}
}
func EmptyOptional() *Optional {
emission := &Optional{value: nil}
return emission
}
func Of(value interface{}) *Optional {
emission := &Optional{value: value}
return emission
}
func (o *Optional) IsSet() bool {
return o.value != nil
}
func (o *Optional) Get() interface{} {
if o.value == nil {
panic("Expected a value to be set.")
}
return o.value
}
func (o *Optional) Or(a interface{}) interface{} {
if o.IsSet() {
return o.Get()
}
return a
}

30
utility/optional_test.go Normal file
View File

@ -0,0 +1,30 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// optional_test.go provides a test complement for the optional.go module.
package utility
import (
. "launchpad.net/gocheck"
)
func (s *S) TestEmptyOptional(c *C) {
var o *Optional = EmptyOptional()
c.Assert(o, Not(IsNil))
c.Check(o.IsSet(), Equals, false)
c.Assert("default", Equals, o.Or("default"))
}
func (s *S) TestOf(c *C) {
var o *Optional = Of(1)
c.Assert(o, Not(IsNil))
c.Check(o.IsSet(), Equals, true)
c.Check(o.Get(), Equals, 1)
c.Check(o.Or(2), Equals, 1)
}

50
utility/priority_queue.go Normal file
View File

@ -0,0 +1,50 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// priority_queue.go provides a simple priority queue.
package utility
type Item struct {
Value interface{}
Priority int64
index int
}
type PriorityQueue []*Item
func (q PriorityQueue) Len() int {
return len(q)
}
func (q PriorityQueue) Less(i, j int) bool {
return q[i].Priority > q[j].Priority
}
func (q PriorityQueue) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
q[i].index = i
q[j].index = j
}
func (q *PriorityQueue) Push(x interface{}) {
queue := *q
size := len(queue)
queue = queue[0 : size+1]
item := x.(*Item)
item.index = size
queue[size] = item
*q = queue
}
func (q *PriorityQueue) Pop() interface{} {
queue := *q
size := len(queue)
item := queue[size-1]
item.index = -1
*q = queue[0 : size-1]
return item
}

View File

@ -0,0 +1,37 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// priority_queue_test.go provides a test complement for the priority_queue.go
// module.
package utility
import (
"container/heap"
. "launchpad.net/gocheck"
)
func (s *S) TestPriorityQueueSort(c *C) {
q := make(PriorityQueue, 0, 6)
c.Check(len(q), Equals, 0)
heap.Push(&q, &Item{Value: "newest", Priority: -100})
heap.Push(&q, &Item{Value: "older", Priority: 90})
heap.Push(&q, &Item{Value: "oldest", Priority: 100})
heap.Push(&q, &Item{Value: "newer", Priority: -90})
heap.Push(&q, &Item{Value: "new", Priority: -80})
heap.Push(&q, &Item{Value: "old", Priority: 80})
c.Check(len(q), Equals, 6)
c.Check(heap.Pop(&q), ValueEquals, "oldest")
c.Check(heap.Pop(&q), ValueEquals, "older")
c.Check(heap.Pop(&q), ValueEquals, "old")
c.Check(heap.Pop(&q), ValueEquals, "new")
c.Check(heap.Pop(&q), ValueEquals, "newer")
c.Check(heap.Pop(&q), ValueEquals, "newest")
}

29
utility/test_helper.go Normal file
View File

@ -0,0 +1,29 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// test_helper.go provides a testing assistents for this package and its
// dependents.
package utility
import (
. "launchpad.net/gocheck"
)
type valueEqualsChecker struct {
*CheckerInfo
}
var ValueEquals Checker = &valueEqualsChecker{
&CheckerInfo{Name: "IsValue", Params: []string{"obtained", "expected"}},
}
func (checker *valueEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
actual := params[0].(*Item).Value
expected := params[1]
return actual == expected, ""
}

23
utility/utility_test.go Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2012, Matt T. Proud
// All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// utility_test.go provides a test suite for all tests in the utility package
// hierarchy. It employs the gocheck framework for test scaffolding.
package utility
import (
. "launchpad.net/gocheck"
"testing"
)
type S struct{}
var _ = Suite(&S{})
func TestUtility(t *testing.T) {
TestingT(t)
}