Update the vendoring of bmizerany/perks/quantile.

This is still from my own fork as there are still critical PRs that
bmizerany hasn't responded to yet.

Change-Id: Ib46e68ec9ecbae9423b1bfe1311569426dfc7ba0
This commit is contained in:
Bjoern Rabenstein 2014-12-05 13:50:14 +01:00
parent 115bde9c47
commit 5ae6e57c4c
6 changed files with 43 additions and 102 deletions

View File

@ -1 +1 @@
Imported at da3e0acc8525a74a0ac8651ac5e7a68891291fdf from https://github.com/u-c-l/perks/tree/opt/pool-for-sample . Imported at 5d903d2c5dc7f55829e36c62ae6c5f5f6d75e70a from https://github.com/u-c-l/perks .

View File

@ -5,6 +5,8 @@ import (
) )
func BenchmarkInsertTargeted(b *testing.B) { func BenchmarkInsertTargeted(b *testing.B) {
b.ReportAllocs()
s := NewTargeted(0.01, 0.5, 0.9, 0.99) s := NewTargeted(0.01, 0.5, 0.9, 0.99)
b.ResetTimer() b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ { for i := float64(0); i < float64(b.N); i++ {

View File

@ -1,28 +0,0 @@
//+build !go1.3
package quantile
type samplePool struct {
pool chan *Sample
}
func newSamplePool(capacity int) *samplePool {
return &samplePool{pool: make(chan *Sample, capacity)}
}
func (sp *samplePool) Get(value, width, delta float64) *Sample {
select {
case sample := <-sp.pool:
sample.Value, sample.Width, sample.Delta = value, width, delta
return sample
default:
return &Sample{value, width, delta}
}
}
func (sp *samplePool) Put(sample *Sample) {
select {
case sp.pool <- sample:
default:
}
}

View File

@ -1,26 +0,0 @@
//+build go1.3
package quantile
import "sync"
// With the Go1.3 sync Pool, there is no max capacity, and a globally shared
// pool is more efficient.
var globalSamplePool = sync.Pool{New: func() interface{} { return &Sample{} }}
type samplePool struct{}
func newSamplePool(capacity int) *samplePool {
// capacity ignored for Go1.3 sync.Pool.
return &samplePool{}
}
func (_ samplePool) Get(value, width, delta float64) *Sample {
sample := globalSamplePool.Get().(*Sample)
sample.Value, sample.Width, sample.Delta = value, width, delta
return sample
}
func (_ samplePool) Put(sample *Sample) {
globalSamplePool.Put(sample)
}

View File

@ -80,11 +80,7 @@ type Stream struct {
func newStream(ƒ invariant) *Stream { func newStream(ƒ invariant) *Stream {
const defaultEpsilon = 0.01 const defaultEpsilon = 0.01
x := &stream{ x := &stream{epsilon: defaultEpsilon, ƒ: ƒ}
epsilon: defaultEpsilon,
ƒ: ƒ,
pool: newSamplePool(1024),
}
return &Stream{x, make(Samples, 0, 500), true} return &Stream{x, make(Samples, 0, 500), true}
} }
@ -173,9 +169,8 @@ func (s *Stream) flushed() bool {
type stream struct { type stream struct {
epsilon float64 epsilon float64
n float64 n float64
l []*Sample l []Sample
ƒ invariant ƒ invariant
pool *samplePool
} }
// SetEpsilon sets the error epsilon for the Stream. The default epsilon is // SetEpsilon sets the error epsilon for the Stream. The default epsilon is
@ -187,9 +182,6 @@ func (s *stream) SetEpsilon(epsilon float64) {
} }
func (s *stream) reset() { func (s *stream) reset() {
for _, sample := range s.l {
s.pool.Put(sample)
}
s.l = s.l[:0] s.l = s.l[:0]
s.n = 0 s.n = 0
} }
@ -206,15 +198,15 @@ func (s *stream) merge(samples Samples) {
c := s.l[i] c := s.l[i]
if c.Value > sample.Value { if c.Value > sample.Value {
// Insert at position i. // Insert at position i.
s.l = append(s.l, nil) s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:]) copy(s.l[i+1:], s.l[i:])
s.l[i] = s.pool.Get(sample.Value, sample.Width, math.Floor(s.ƒ(s, r))-1) s.l[i] = Sample{sample.Value, sample.Width, math.Floor(s.ƒ(s, r)) - 1}
i++ i++
goto inserted goto inserted
} }
r += c.Width r += c.Width
} }
s.l = append(s.l, s.pool.Get(sample.Value, sample.Width, 0)) s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++ i++
inserted: inserted:
s.n += sample.Width s.n += sample.Width
@ -245,19 +237,21 @@ func (s *stream) compress() {
return return
} }
x := s.l[len(s.l)-1] x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- { for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i] c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width x.Width += c.Width
s.l[xi] = x
// Remove element at i. // Remove element at i.
copy(s.l[i:], s.l[i+1:]) copy(s.l[i:], s.l[i+1:])
s.l[len(s.l)-1] = nil
s.l = s.l[:len(s.l)-1] s.l = s.l[:len(s.l)-1]
s.pool.Put(c) xi -= 1
} else { } else {
x = c x = c
xi = i
} }
r -= c.Width r -= c.Width
} }
@ -265,8 +259,6 @@ func (s *stream) compress() {
func (s *stream) samples() Samples { func (s *stream) samples() Samples {
samples := make(Samples, len(s.l)) samples := make(Samples, len(s.l))
for i, c := range s.l { copy(samples, s.l)
samples[i] = *c
}
return samples return samples
} }

View File

@ -1,7 +1,6 @@
package quantile package quantile
import ( import (
"math"
"math/rand" "math/rand"
"sort" "sort"
"testing" "testing"
@ -12,26 +11,23 @@ func TestQuantRandQuery(t *testing.T) {
a := make([]float64, 0, 1e5) a := make([]float64, 0, 1e5)
rand.Seed(42) rand.Seed(42)
for i := 0; i < cap(a); i++ { for i := 0; i < cap(a); i++ {
v := float64(rand.Int63()) v := rand.NormFloat64()
s.Insert(v) s.Insert(v)
a = append(a, v) a = append(a, v)
} }
t.Logf("len: %d", s.Count()) t.Logf("len: %d", s.Count())
sort.Float64s(a) sort.Float64s(a)
w := getPerc(a, 0.50) w, min, max := getPerc(a, 0.50)
if g := s.Query(0.50); math.Abs(w-g)/w > 0.03 { if g := s.Query(0.50); g < min || g > max {
t.Errorf("perc50: want %v, got %v", w, g) t.Errorf("perc50: want %v [%f,%f], got %v", w, min, max, g)
t.Logf("e: %f", math.Abs(w-g)/w)
} }
w = getPerc(a, 0.90) w, min, max = getPerc(a, 0.90)
if g := s.Query(0.90); math.Abs(w-g)/w > 0.03 { if g := s.Query(0.90); g < min || g > max {
t.Errorf("perc90: want %v, got %v", w, g) t.Errorf("perc90: want %v [%f,%f], got %v", w, min, max, g)
t.Logf("e: %f", math.Abs(w-g)/w)
} }
w = getPerc(a, 0.99) w, min, max = getPerc(a, 0.99)
if g := s.Query(0.99); math.Abs(w-g)/w > 0.03 { if g := s.Query(0.99); g < min || g > max {
t.Errorf("perc99: want %v, got %v", w, g) t.Errorf("perc99: want %v [%f,%f], got %v", w, min, max, g)
t.Logf("e: %f", math.Abs(w-g)/w)
} }
} }
@ -51,7 +47,7 @@ func TestQuantRandMergeQuery(t *testing.T) {
rand.Seed(42) rand.Seed(42)
a := make([]float64, 0, 1e6) a := make([]float64, 0, 1e6)
for i := 0; i < cap(a); i++ { for i := 0; i < cap(a); i++ {
v := float64(rand.Int63()) v := rand.NormFloat64()
a = append(a, v) a = append(a, v)
ch <- v ch <- v
} }
@ -63,20 +59,17 @@ func TestQuantRandMergeQuery(t *testing.T) {
t.Logf("len: %d", s.Count()) t.Logf("len: %d", s.Count())
sort.Float64s(a) sort.Float64s(a)
w := getPerc(a, 0.50) w, min, max := getPerc(a, 0.50)
if g := s.Query(0.50); math.Abs(w-g)/w > 0.03 { if g := s.Query(0.50); g < min || g > max {
t.Errorf("perc50: want %v, got %v", w, g) t.Errorf("perc50: want %v [%f,%f], got %v", w, min, max, g)
t.Logf("e: %f", math.Abs(w-g)/w)
} }
w = getPerc(a, 0.90) w, min, max = getPerc(a, 0.90)
if g := s.Query(0.90); math.Abs(w-g)/w > 0.03 { if g := s.Query(0.90); g < min || g > max {
t.Errorf("perc90: want %v, got %v", w, g) t.Errorf("perc90: want %v [%f,%f], got %v", w, min, max, g)
t.Logf("e: %f", math.Abs(w-g)/w)
} }
w = getPerc(a, 0.99) w, min, max = getPerc(a, 0.99)
if g := s.Query(0.99); math.Abs(w-g)/w > 0.03 { if g := s.Query(0.99); g < min || g > max {
t.Errorf("perc99: want %v, got %v", w, g) t.Errorf("perc99: want %v [%f,%f], got %v", w, min, max, g)
t.Logf("e: %f", math.Abs(w-g)/w)
} }
} }
@ -122,7 +115,15 @@ func TestDefaults(t *testing.T) {
} }
} }
func getPerc(x []float64, p float64) float64 { func getPerc(x []float64, p float64) (want, min, max float64) {
k := int(float64(len(x)) * p) k := int(float64(len(x)) * p)
return x[k] lower := int(float64(len(x)) * (p - 0.04))
if lower < 0 {
lower = 0
}
upper := int(float64(len(x))*(p+0.04)) + 1
if upper >= len(x) {
upper = len(x) - 1
}
return x[k], x[lower], x[upper]
} }