From c98db4eccd6e212585505fc1079d4a0472e04ff5 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 5 Mar 2020 20:07:45 +0100 Subject: [PATCH 01/25] Demo sparse histograms Printf the structure of it instead of actually encoding it. Signed-off-by: beorn7 --- examples/random/main.go | 7 +- prometheus/histogram.go | 222 +++++++++++++++++++++++++++++++++------- 2 files changed, 188 insertions(+), 41 deletions(-) diff --git a/examples/random/main.go b/examples/random/main.go index 20a9db5..9b910fa 100644 --- a/examples/random/main.go +++ b/examples/random/main.go @@ -54,9 +54,10 @@ var ( // normal distribution, with 20 buckets centered on the mean, each // half-sigma wide. rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "rpc_durations_histogram_seconds", - Help: "RPC latency distributions.", - Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + Name: "rpc_durations_histogram_seconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + SparseBucketsResolution: 20, }) ) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 4271f43..e7115e4 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -14,7 +14,9 @@ package prometheus import ( + "bytes" "fmt" + "io" "math" "runtime" "sort" @@ -58,12 +60,14 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefSparseBucketsZeroThreshold is the default value for +// SparseBucketsZeroThreshold in the HistogramOpts. +var DefSparseBucketsZeroThreshold = 1e-128 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) // LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest @@ -146,8 +150,32 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no sparse buckets (see below) are used, otherwise the + // default is no buckets. (In other words, if you want to use both + // reguler buckets and sparse buckets, you have to define the regular + // buckets here explicitly.) Buckets []float64 + + // If SparseBucketsResolution is not zero, sparse buckets are used (in + // addition to the regular buckets, if defined above). Every power of + // ten is divided into the given number of exponential buckets. For + // example, if set to 3, the bucket boundaries are approximately […, + // 0.1, 0.215, 0.464, 1, 2.15, 4,64, 10, 21.5, 46.4, 100, …] Histograms + // can only be properly aggregated if they use the same + // resolution. Therefore, it is recommended to use 20 as a resolution, + // which is generally expected to be a good tradeoff between resource + // usage and accuracy (resulting in a maximum error of quantile values + // of about 6%). + SparseBucketsResolution uint8 + // All observations with an absolute value of less or equal + // SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For + // best results, this should be close to a bucket boundary. This is + // moste easily accomplished by picking a power of ten. If + // SparseBucketsZeroThreshold is left at zero (or set to a negative + // value), DefSparseBucketsZeroThreshold is used as the threshold. + SparseBucketsZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -184,16 +212,20 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + sparseResolution: opts.SparseBucketsResolution, + sparseThreshold: opts.SparseBucketsZeroThreshold, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, + } + if len(h.upperBounds) == 0 && opts.SparseBucketsResolution == 0 { + h.upperBounds = DefBuckets + } + if h.sparseThreshold <= 0 { + h.sparseThreshold = DefSparseBucketsZeroThreshold } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -228,6 +260,67 @@ type histogramCounts struct { sumBits uint64 count uint64 buckets []uint64 + // sparse buckets are implemented with a sync.Map for this PoC. A + // dedicated data structure will likely be more efficient. + // There are separate maps for negative and positive observations. + // The map's value is a *uint64, counting observations in that bucket. + // The map's key is the logarithmic index of the bucket. Index 0 is for an + // upper bound of 1. Each increment/decrement by SparseBucketsResolution + // multiplies/divides the upper bound by 10. Indices in between are + // spaced exponentially as defined in spareBounds. + sparseBucketsPositive, sparseBucketsNegative sync.Map + // sparseZeroBucket counts all (positive and negative) observations in + // the zero bucket (with an absolute value less or equal + // SparseBucketsZeroThreshold). + sparseZeroBucket uint64 +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if spare buckets should be done, +// too. whichSparse is 0 for the sparseZeroBucket and +1 or -1 for +// sparseBucketsPositive or sparseBucketsNegative, respectively. sparseKey is +// the key of the sparse bucket to use. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool, whichSparse int, sparseKey int) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + for { + oldBits := atomic.LoadUint64(&hc.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hc.sumBits, oldBits, newBits) { + break + } + } + if doSparse { + switch whichSparse { + case 0: + atomic.AddUint64(&hc.sparseZeroBucket, 1) + case +1: + addToSparseBucket(&hc.sparseBucketsPositive, sparseKey, 1) + case -1: + addToSparseBucket(&hc.sparseBucketsNegative, sparseKey, 1) + default: + panic(fmt.Errorf("invalid value for whichSparse: %d", whichSparse)) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) +} + +func addToSparseBucket(buckets *sync.Map, key int, increment uint64) { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddUint64(existingBucket.(*uint64), increment) + return + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddUint64(actualBucket.(*uint64), increment) + } } type histogram struct { @@ -259,9 +352,11 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + sparseResolution uint8 + sparseThreshold float64 now func() time.Time // To mock out time.Now() for testing. } @@ -309,6 +404,9 @@ func (h *histogram) Write(out *dto.Metric) error { SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -329,11 +427,7 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. + // Add all the cold counts to the new hot counts and reset the cold counts. atomic.AddUint64(&hotCounts.count, count) atomic.StoreUint64(&coldCounts.count, 0) for { @@ -348,9 +442,64 @@ func (h *histogram) Write(out *dto.Metric) error { atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) atomic.StoreUint64(&coldCounts.buckets[i], 0) } + if h.sparseResolution != 0 { + zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) + + defer func() { + atomic.AddUint64(&hotCounts.sparseZeroBucket, zeroBucket) + atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) + coldCounts.sparseBucketsPositive.Range(addAndReset(&hotCounts.sparseBucketsPositive)) + coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative)) + }() + + var buf bytes.Buffer + // TODO(beorn7): encode zero bucket threshold and count. + fmt.Println("Zero bucket:", zeroBucket) // DEBUG + fmt.Println("Positive buckets:") // DEBUG + if _, err := encodeSparseBuckets(&buf, &coldCounts.sparseBucketsPositive, zeroBucket); err != nil { + return err + } + fmt.Println("Negative buckets:") // DEBUG + if _, err := encodeSparseBuckets(&buf, &coldCounts.sparseBucketsNegative, zeroBucket); err != nil { + return err + } + } return nil } +func encodeSparseBuckets(w io.Writer, buckets *sync.Map, zeroBucket uint64) (n int, err error) { + // TODO(beorn7): Add actual encoding of spare buckets. + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + fmt.Println(len(ii), "buckets") + var prev uint64 + for _, i := range ii { + v, _ := buckets.Load(i) + current := atomic.LoadUint64(v.(*uint64)) + fmt.Printf("- %d: %d Δ=%d\n", i, current, int(current)-int(prev)) + prev = current + } + return 0, nil +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*uint64) + addToSparseBucket(hotBuckets, k.(int), atomic.LoadUint64(bucket)) + atomic.StoreUint64(bucket, 0) + return true + } +} + // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { @@ -368,25 +517,22 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + doSparse := h.sparseResolution != 0 + var whichSparse, sparseKey int + if doSparse { + switch { + case v > h.sparseThreshold: + whichSparse = +1 + case v < -h.sparseThreshold: + whichSparse = -1 + } + sparseKey = int(math.Ceil(math.Log10(math.Abs(v)) * float64(h.sparseResolution))) + } // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] - - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) - } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + h.counts[n>>63].observe(v, bucket, doSparse, whichSparse, sparseKey) } // updateExemplar replaces the exemplar for the provided bucket. With empty From abe540f8c095c8d94c6fc836b5aac37184e21c11 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 7 Apr 2020 23:18:40 +0200 Subject: [PATCH 02/25] Encode sparse histograms in protobuf Signed-off-by: beorn7 --- go.mod | 2 +- go.sum | 2 ++ prometheus/histogram.go | 61 ++++++++++++++++++++++------------------- 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 9d5bd9d..1538ae1 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/google/go-cmp v0.4.0 // indirect github.com/json-iterator/go v1.1.9 github.com/kr/pretty v0.1.0 // indirect - github.com/prometheus/client_model v0.2.0 + github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4 github.com/prometheus/common v0.9.1 github.com/prometheus/procfs v0.0.8 github.com/stretchr/testify v1.4.0 // indirect diff --git a/go.sum b/go.sum index e805d46..0ac79d9 100644 --- a/go.sum +++ b/go.sum @@ -73,6 +73,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4 h1:7Ws+6l4/5eJPHAxe0Axwo4XJwSAA4i0ipEjuoLXWFyo= +github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= diff --git a/prometheus/histogram.go b/prometheus/histogram.go index e7115e4..56dd2bf 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -14,9 +14,7 @@ package prometheus import ( - "bytes" "fmt" - "io" "math" "runtime" "sort" @@ -215,7 +213,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h := &histogram{ desc: desc, upperBounds: opts.Buckets, - sparseResolution: opts.SparseBucketsResolution, + sparseResolution: uint32(opts.SparseBucketsResolution), sparseThreshold: opts.SparseBucketsZeroThreshold, labelPairs: makeLabelPairs(desc, labelValues), counts: [2]*histogramCounts{{}, {}}, @@ -355,7 +353,7 @@ type histogram struct { upperBounds []float64 labelPairs []*dto.LabelPair exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - sparseResolution uint8 + sparseResolution uint32 // Instead of uint8 to be ready for protobuf encoding. sparseThreshold float64 now func() time.Time // To mock out time.Now() for testing. @@ -400,9 +398,11 @@ func (h *histogram) Write(out *dto.Metric) error { } his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SbResolution: &h.sparseResolution, + SbZeroThreshold: &h.sparseThreshold, } out.Histogram = his out.Label = h.labelPairs @@ -452,38 +452,43 @@ func (h *histogram) Write(out *dto.Metric) error { coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative)) }() - var buf bytes.Buffer - // TODO(beorn7): encode zero bucket threshold and count. - fmt.Println("Zero bucket:", zeroBucket) // DEBUG - fmt.Println("Positive buckets:") // DEBUG - if _, err := encodeSparseBuckets(&buf, &coldCounts.sparseBucketsPositive, zeroBucket); err != nil { - return err - } - fmt.Println("Negative buckets:") // DEBUG - if _, err := encodeSparseBuckets(&buf, &coldCounts.sparseBucketsNegative, zeroBucket); err != nil { - return err - } + his.SbZeroCount = proto.Uint64(zeroBucket) + his.SbNegative = makeSparseBuckets(&coldCounts.sparseBucketsNegative) + his.SbPositive = makeSparseBuckets(&coldCounts.sparseBucketsPositive) } return nil } -func encodeSparseBuckets(w io.Writer, buckets *sync.Map, zeroBucket uint64) (n int, err error) { - // TODO(beorn7): Add actual encoding of spare buckets. +func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { var ii []int buckets.Range(func(k, v interface{}) bool { ii = append(ii, k.(int)) return true }) sort.Ints(ii) - fmt.Println(len(ii), "buckets") - var prev uint64 - for _, i := range ii { - v, _ := buckets.Load(i) - current := atomic.LoadUint64(v.(*uint64)) - fmt.Printf("- %d: %d Δ=%d\n", i, current, int(current)-int(prev)) - prev = current + + if len(ii) == 0 { + return nil } - return 0, nil + + sbs := dto.SparseBuckets{} + var prevCount uint64 + var prevI int + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadUint64(v.(*uint64)) + if n == 0 || i-prevI != 1 { + sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ + Offset: proto.Int(i - prevI), + Length: proto.Uint32(1), + }) + } else { + *sbs.Span[len(sbs.Span)-1].Length++ + } + sbs.Delta = append(sbs.Delta, int64(count)-int64(prevCount)) // TODO(beorn7): Do proper overflow handling. + prevI, prevCount = i, count + } + return &sbs } // addAndReset returns a function to be used with sync.Map.Range of spare From d1f5366b5282dfdc6fb48885e9a40f52e4e8df32 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Mon, 13 Apr 2020 15:50:40 +0200 Subject: [PATCH 03/25] Fix span offset Signed-off-by: beorn7 --- prometheus/histogram.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 56dd2bf..e7aed2b 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -473,20 +473,20 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { sbs := dto.SparseBuckets{} var prevCount uint64 - var prevI int + var nextI int for n, i := range ii { v, _ := buckets.Load(i) count := atomic.LoadUint64(v.(*uint64)) - if n == 0 || i-prevI != 1 { + if n == 0 || i-nextI != 0 { sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ - Offset: proto.Int(i - prevI), + Offset: proto.Int(i - nextI), Length: proto.Uint32(1), }) } else { *sbs.Span[len(sbs.Span)-1].Length++ } sbs.Delta = append(sbs.Delta, int64(count)-int64(prevCount)) // TODO(beorn7): Do proper overflow handling. - prevI, prevCount = i, count + nextI, prevCount = i+1, count } return &sbs } From a9d0066408f653a4cd88a4303bbfad976fe20f1d Mon Sep 17 00:00:00 2001 From: beorn7 Date: Mon, 13 Apr 2020 16:43:23 +0200 Subject: [PATCH 04/25] Add note about pow-of-10 precision issue Signed-off-by: beorn7 --- prometheus/histogram.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index e7aed2b..d73b28e 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -531,6 +531,11 @@ func (h *histogram) observe(v float64, bucket int) { case v < -h.sparseThreshold: whichSparse = -1 } + // TODO(beorn7): This sometimes gives inaccurate results for + // floats that are actual powers of 10, e.g. math.Log10(0.1) is + // calculated as -0.9999999999999999 rather than -1 and thus + // yields a key unexpectedly one off. Maybe special-case precise + // powers of 10. sparseKey = int(math.Ceil(math.Log10(math.Abs(v)) * float64(h.sparseResolution))) } // We increment h.countAndHotIdx so that the counter in the lower From 08104a0ef98e85cd6090eb035d63504ad9ae0ef9 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Fri, 29 Jan 2021 22:24:27 +0100 Subject: [PATCH 05/25] Minor doc comment fixes Signed-off-by: beorn7 --- prometheus/histogram.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index d73b28e..c199c5d 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -170,7 +170,7 @@ type HistogramOpts struct { // All observations with an absolute value of less or equal // SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For // best results, this should be close to a bucket boundary. This is - // moste easily accomplished by picking a power of ten. If + // most easily accomplished by picking a power of ten. If // SparseBucketsZeroThreshold is left at zero (or set to a negative // value), DefSparseBucketsZeroThreshold is used as the threshold. SparseBucketsZeroThreshold float64 @@ -536,6 +536,7 @@ func (h *histogram) observe(v float64, bucket int) { // calculated as -0.9999999999999999 rather than -1 and thus // yields a key unexpectedly one off. Maybe special-case precise // powers of 10. + // TODO(beorn7): This needs special-casing for ±Inf and NaN. sparseKey = int(math.Ceil(math.Log10(math.Abs(v)) * float64(h.sparseResolution))) } // We increment h.countAndHotIdx so that the counter in the lower From a9df0bac899b0bb4ab283adb3ef16aa5bf632f67 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Fri, 30 Apr 2021 21:45:23 +0200 Subject: [PATCH 06/25] Update prometheus/client_model (now using sparsehistogram branch) Signed-off-by: beorn7 --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index f2d97fc..93f3d82 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/golang/protobuf v1.4.3 github.com/json-iterator/go v1.1.10 - github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4 + github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6 github.com/prometheus/common v0.18.0 github.com/prometheus/procfs v0.6.0 golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 diff --git a/go.sum b/go.sum index 1a9bb21..30057e2 100644 --- a/go.sum +++ b/go.sum @@ -223,6 +223,8 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4 h1:7Ws+6l4/5eJPHAxe0Axwo4XJwSAA4i0ipEjuoLXWFyo= github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6 h1:wlZYx9ITBsvMO/wVoi30A36fAdRlBC130JksGGfaYl8= +github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= From b7a540a1b21047d277b4105f13a8710201d3f2a0 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Mon, 3 May 2021 16:09:28 +0200 Subject: [PATCH 07/25] Fix test Signed-off-by: beorn7 --- prometheus/examples_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/prometheus/examples_test.go b/prometheus/examples_test.go index a73ed18..f97a3e2 100644 --- a/prometheus/examples_test.go +++ b/prometheus/examples_test.go @@ -538,6 +538,8 @@ func ExampleHistogram() { // cumulative_count: 816 // upper_bound: 40 // > + // sb_resolution: 0 + // sb_zero_threshold: 1e-128 // > } From 553ed73917a06a4eaeae55693cdb866d7d2fc8b2 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Mon, 3 May 2021 16:58:07 +0200 Subject: [PATCH 08/25] Fix lint warning Signed-off-by: beorn7 --- prometheus/histogram.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 63a3d3b..36cf415 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -480,7 +480,7 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { count := atomic.LoadUint64(v.(*uint64)) if n == 0 || i-nextI != 0 { sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ - Offset: proto.Int(i - nextI), + Offset: proto.Int32(int32(i - nextI)), Length: proto.Uint32(1), }) } else { From 97eb0411ac9e62849da7a0e18e2d278adc9157b8 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Mon, 3 May 2021 18:08:16 +0200 Subject: [PATCH 09/25] Tidy go.sum Signed-off-by: beorn7 --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 30057e2..94a0ac1 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,6 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4 h1:7Ws+6l4/5eJPHAxe0Axwo4XJwSAA4i0ipEjuoLXWFyo= -github.com/prometheus/client_model v0.2.1-0.20200406191659-4b803f3550a4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6 h1:wlZYx9ITBsvMO/wVoi30A36fAdRlBC130JksGGfaYl8= github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= From 31318b7523c84578af6c98fab32679c804586096 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Sat, 12 Jun 2021 00:58:46 +0200 Subject: [PATCH 10/25] Switch to base-2 buckets This seem what OTel is converging towards, see https://github.com/open-telemetry/oteps/pull/149 . I see pros and cons with base-10 vs base-2. They are discussed in detail in that OTel PR, and the gist of the discussion is pretty much in line with my design doc. Since the balance is easy to tip here, I think we should go with base-2 if OTel picks base-2. This also seems to be in agreement with several proprietary solution (see again the discussion on that OTel PR.) The idea to make the number of buckets per power of 2 (or formerly 10) a power of 2 itself was also sketched out in the design doc already. It guarantees mergeability of different resolutions. I was undecided between making it a recommendation or mandatory. Now I think it should be mandatory as it has the additional benefit of playing well with OTel's plans. This commit also addresses a number of outstanding TODOs. Signed-off-by: beorn7 --- examples/random/main.go | 8 +- go.mod | 2 +- go.sum | 5 +- prometheus/examples_test.go | 4 +- prometheus/histogram.go | 361 ++++++++++++++++++++++++++++++------ 5 files changed, 312 insertions(+), 68 deletions(-) diff --git a/examples/random/main.go b/examples/random/main.go index 9b910fa..334fd8a 100644 --- a/examples/random/main.go +++ b/examples/random/main.go @@ -54,10 +54,10 @@ var ( // normal distribution, with 20 buckets centered on the mean, each // half-sigma wide. rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "rpc_durations_histogram_seconds", - Help: "RPC latency distributions.", - Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), - SparseBucketsResolution: 20, + Name: "rpc_durations_histogram_seconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + SparseBucketsFactor: 1.1, }) ) diff --git a/go.mod b/go.mod index 82fca34..813ae76 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/golang/protobuf v1.4.3 github.com/json-iterator/go v1.1.11 - github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6 + github.com/prometheus/client_model v0.2.1-0.20210611125623-bbaf1cc17b15 github.com/prometheus/common v0.26.0 github.com/prometheus/procfs v0.6.0 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 diff --git a/go.sum b/go.sum index 9bd0134..fc2a056 100644 --- a/go.sum +++ b/go.sum @@ -24,6 +24,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -77,8 +78,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6 h1:wlZYx9ITBsvMO/wVoi30A36fAdRlBC130JksGGfaYl8= -github.com/prometheus/client_model v0.2.1-0.20210403151606-24db95a3d5d6/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210611125623-bbaf1cc17b15 h1:l+7cw41KLeOScRk7f9Tg//xT8LAz55Kg+Fg9i0i0Cyw= +github.com/prometheus/client_model v0.2.1-0.20210611125623-bbaf1cc17b15/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= diff --git a/prometheus/examples_test.go b/prometheus/examples_test.go index f97a3e2..bdcdfb4 100644 --- a/prometheus/examples_test.go +++ b/prometheus/examples_test.go @@ -538,8 +538,8 @@ func ExampleHistogram() { // cumulative_count: 816 // upper_bound: 40 // > - // sb_resolution: 0 - // sb_zero_threshold: 1e-128 + // sb_schema: 0 + // sb_zero_threshold: 0 // > } diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 1c11128..a0e4b4e 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -28,6 +28,176 @@ import ( dto "github.com/prometheus/client_model/go" ) +// sparseBounds for the frac of observed values. Only relevant for schema > 0. +// Position in the slice is the schema. (0 is never used, just here for +// convenience of using the schema directly as the index.) +var sparseBounds = [][]float64{ + // Schema "0": + []float64{0.5}, + // Schema 1: + []float64{0.5, 0.7071067811865475}, + // Schema 2: + []float64{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + []float64{0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711}, + // Schema 4: + []float64{0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735}, + // Schema 5: + []float64{0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999}, + // Schema 6: + []float64{0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752}, + // Schema 7: + []float64{0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328}, + // Schema 8: + []float64{0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698}, +} + +// The sparseBounds above can be generated with the code below. +// TODO(beorn7): Actually do it via go generate. +// +// var sparseBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate sparseBounds. +// numBuckets := 1 +// for i := range sparseBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = sparseBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// sparseBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in // configurable buckets. Similar to a summary, it also provides a sum of // observations and an observation count. @@ -68,7 +238,10 @@ var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} // DefSparseBucketsZeroThreshold is the default value for // SparseBucketsZeroThreshold in the HistogramOpts. -var DefSparseBucketsZeroThreshold = 1e-128 +const DefSparseBucketsZeroThreshold = 2.938735877055719e-39 + +// This is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), which +// is a bucket boundary at all possible resolutions. var errBucketLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in histograms", bucketLabel, @@ -162,24 +335,41 @@ type HistogramOpts struct { // buckets here explicitly.) Buckets []float64 - // If SparseBucketsResolution is not zero, sparse buckets are used (in - // addition to the regular buckets, if defined above). Every power of - // ten is divided into the given number of exponential buckets. For - // example, if set to 3, the bucket boundaries are approximately […, - // 0.1, 0.215, 0.464, 1, 2.15, 4,64, 10, 21.5, 46.4, 100, …] Histograms - // can only be properly aggregated if they use the same - // resolution. Therefore, it is recommended to use 20 as a resolution, - // which is generally expected to be a good tradeoff between resource - // usage and accuracy (resulting in a maximum error of quantile values - // of about 6%). - SparseBucketsResolution uint8 + // If SparseBucketsFactor is greater than one, sparse buckets are used + // (in addition to the regular buckets, if defined above). Sparse + // buckets are exponential buckets covering the whole float64 range + // (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant factor. + // SparseBucketsFactor provides an upper bound for this factor + // (exception see below). The smaller SparseBucketsFactor, the more + // buckets will be used and thus the more costly the histogram will + // become. A generally good trade-off between cost and accuracy is a + // value of 1.1 (each bucket is at most 10% wider than the previous + // one), which will result in each power of two divided into 8 buckets + // (e.g. there will be 8 buckets between 1 and 2, same as between 2 and + // 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^n), where n is an integer number between (and including) -8 and + // 4. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to SparseBucketsFactor. Note that the smallest + // possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) ). If + // SparseBucketsFactor is greater than 1 but smaller than 2^(2^-8), then + // the actually used factor is still 2^(2^-8) even though it is larger + // than the provided SparseBucketsFactor. + SparseBucketsFactor float64 // All observations with an absolute value of less or equal // SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For // best results, this should be close to a bucket boundary. This is - // most easily accomplished by picking a power of ten. If + // usually the case if picking a power of two. If // SparseBucketsZeroThreshold is left at zero (or set to a negative // value), DefSparseBucketsZeroThreshold is used as the threshold. SparseBucketsZeroThreshold float64 + // TODO(beorn7): Need a setting to limit total bucket count and to + // configure a strategy to enforce the limit, e.g. if minimum duration + // after last reset, reset. If not, half the resolution and/or expand + // the zero bucket. } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -217,20 +407,24 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - sparseResolution: uint32(opts.SparseBucketsResolution), - sparseThreshold: opts.SparseBucketsZeroThreshold, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + sparseThreshold: opts.SparseBucketsZeroThreshold, + labelPairs: MakeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, } - if len(h.upperBounds) == 0 && opts.SparseBucketsResolution == 0 { + if len(h.upperBounds) == 0 && opts.SparseBucketsFactor <= 1 { h.upperBounds = DefBuckets } if h.sparseThreshold <= 0 { h.sparseThreshold = DefSparseBucketsZeroThreshold } + if opts.SparseBucketsFactor <= 1 { + h.sparseThreshold = 0 // To mark that there are no sparse buckets. + } else { + h.sparseSchema = pickSparseSchema(opts.SparseBucketsFactor) + } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { if upperBound >= h.upperBounds[i+1] { @@ -264,14 +458,14 @@ type histogramCounts struct { sumBits uint64 count uint64 buckets []uint64 - // sparse buckets are implemented with a sync.Map for this PoC. A - // dedicated data structure will likely be more efficient. - // There are separate maps for negative and positive observations. - // The map's value is a *uint64, counting observations in that bucket. - // The map's key is the logarithmic index of the bucket. Index 0 is for an - // upper bound of 1. Each increment/decrement by SparseBucketsResolution - // multiplies/divides the upper bound by 10. Indices in between are - // spaced exponentially as defined in spareBounds. + // sparse buckets are implemented with a sync.Map for now. A dedicated + // data structure will likely be more efficient. There are separate maps + // for negative and positive observations. The map's value is an *int64, + // counting observations in that bucket. (Note that we don't use uint64 + // as an int64 won't overflow in practice, and working with signed + // numbers from the beginning simplifies the handling of deltas.) The + // map's key is the index of the bucket according to the used + // sparseSchema. Index 0 is for an upper bound of 1. sparseBucketsPositive, sparseBucketsNegative sync.Map // sparseZeroBucket counts all (positive and negative) observations in // the zero bucket (with an absolute value less or equal @@ -312,10 +506,10 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool, whichSp atomic.AddUint64(&hc.count, 1) } -func addToSparseBucket(buckets *sync.Map, key int, increment uint64) { +func addToSparseBucket(buckets *sync.Map, key int, increment int64) { if existingBucket, ok := buckets.Load(key); ok { // Fast path without allocation. - atomic.AddUint64(existingBucket.(*uint64), increment) + atomic.AddInt64(existingBucket.(*int64), increment) return } // Bucket doesn't exist yet. Slow path allocating new counter. @@ -323,7 +517,7 @@ func addToSparseBucket(buckets *sync.Map, key int, increment uint64) { if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { // The bucket was created concurrently in another goroutine. // Have to increment after all. - atomic.AddUint64(actualBucket.(*uint64), increment) + atomic.AddInt64(actualBucket.(*int64), increment) } } @@ -339,7 +533,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -356,11 +550,11 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - sparseResolution uint32 // Instead of uint8 to be ready for protobuf encoding. - sparseThreshold float64 + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + sparseSchema int32 + sparseThreshold float64 // This is zero iff no sparse buckets are used. now func() time.Time // To mock out time.Now() for testing. } @@ -407,7 +601,7 @@ func (h *histogram) Write(out *dto.Metric) error { Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - SbResolution: &h.sparseResolution, + SbSchema: &h.sparseSchema, SbZeroThreshold: &h.sparseThreshold, } out.Histogram = his @@ -448,7 +642,7 @@ func (h *histogram) Write(out *dto.Metric) error { atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) atomic.StoreUint64(&coldCounts.buckets[i], 0) } - if h.sparseResolution != 0 { + if h.sparseThreshold != 0 { zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) defer func() { @@ -478,21 +672,41 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { } sbs := dto.SparseBuckets{} - var prevCount uint64 + var prevCount int64 var nextI int + + appendDelta := func(count int64) { + *sbs.Span[len(sbs.Span)-1].Length++ + sbs.Delta = append(sbs.Delta, count-prevCount) + prevCount = count + } + for n, i := range ii { v, _ := buckets.Load(i) - count := atomic.LoadUint64(v.(*uint64)) - if n == 0 || i-nextI != 0 { + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ - Offset: proto.Int32(int32(i - nextI)), - Length: proto.Uint32(1), + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), }) } else { - *sbs.Span[len(sbs.Span)-1].Length++ + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } } - sbs.Delta = append(sbs.Delta, int64(count)-int64(prevCount)) // TODO(beorn7): Do proper overflow handling. - nextI, prevCount = i+1, count + appendDelta(count) + nextI = i + 1 } return &sbs } @@ -504,9 +718,9 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { // recreated on the next scrape). func addAndReset(hotBuckets *sync.Map) func(k, v interface{}) bool { return func(k, v interface{}) bool { - bucket := v.(*uint64) - addToSparseBucket(hotBuckets, k.(int), atomic.LoadUint64(bucket)) - atomic.StoreUint64(bucket, 0) + bucket := v.(*int64) + addToSparseBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) + atomic.StoreInt64(bucket, 0) return true } } @@ -528,7 +742,8 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { - doSparse := h.sparseResolution != 0 + // Do not add to sparse buckets for NaN observations. + doSparse := h.sparseThreshold != 0 && !math.IsNaN(v) var whichSparse, sparseKey int if doSparse { switch { @@ -537,13 +752,20 @@ func (h *histogram) observe(v float64, bucket int) { case v < -h.sparseThreshold: whichSparse = -1 } - // TODO(beorn7): This sometimes gives inaccurate results for - // floats that are actual powers of 10, e.g. math.Log10(0.1) is - // calculated as -0.9999999999999999 rather than -1 and thus - // yields a key unexpectedly one off. Maybe special-case precise - // powers of 10. - // TODO(beorn7): This needs special-casing for ±Inf and NaN. - sparseKey = int(math.Ceil(math.Log10(math.Abs(v)) * float64(h.sparseResolution))) + frac, exp := math.Frexp(math.Abs(v)) + switch { + case math.IsInf(v, 0): + sparseKey = math.MaxInt32 // Largest possible sparseKey. + case h.sparseSchema > 0: + bounds := sparseBounds[h.sparseSchema] + sparseKey = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + default: + sparseKey = exp + if frac == 0.5 { + sparseKey-- + } + sparseKey /= 1 << -h.sparseSchema + } } // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value @@ -797,3 +1019,24 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSparseschema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSparseSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return 8 + case floor >= 4: + return -4 + default: + return -int32(floor) + } +} From 6c4e0ef7407f9781d4e8aa0cb4a6c02f3c535caa Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 23 Jun 2021 21:56:26 +0200 Subject: [PATCH 11/25] Add tests for sparse histogram Signed-off-by: beorn7 --- prometheus/histogram.go | 6 +-- prometheus/histogram_test.go | 90 ++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+), 3 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index a0e4b4e..1e3c4e1 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -238,11 +238,11 @@ var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} // DefSparseBucketsZeroThreshold is the default value for // SparseBucketsZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. const DefSparseBucketsZeroThreshold = 2.938735877055719e-39 -// This is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), which -// is a bucket boundary at all possible resolutions. - var errBucketLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in histograms", bucketLabel, ) diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index 3514e81..61d8047 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -456,3 +456,93 @@ func TestHistogramExemplar(t *testing.T) { } } } + +func TestSparseHistogram(t *testing.T) { + + scenarios := []struct { + name string + observations []float64 + factor float64 + zeroThreshold float64 + want string // String representation of protobuf. + }{ + { + name: "no sparse buckets", + observations: []float64{1, 2, 3}, + factor: 1, + want: `sample_count:3 sample_sum:6 bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: sb_schema:0 sb_zero_threshold:0 `, // Has conventional buckets because there are no sparse buckets. + }, + { + name: "factor 1.1 results in schema 3", + observations: []float64{0, 1, 2, 3}, + factor: 1.1, + want: `sample_count:4 sample_sum:6 sb_schema:3 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: span: span: delta:1 delta:0 delta:0 > `, + }, + { + name: "factor 1.2 results in schema 2", + observations: []float64{0, 1, 1.2, 1.4, 1.8, 2}, + factor: 1.2, + want: `sample_count:6 sample_sum:7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + { + name: "negative buckets", + observations: []float64{0, -1, -1.2, -1.4, -1.8, -2}, + factor: 1.2, + want: `sample_count:6 sample_sum:-7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + { + name: "negative and positive buckets", + observations: []float64{0, -1, -1.2, -1.4, -1.8, -2, 1, 1.2, 1.4, 1.8, 2}, + factor: 1.2, + want: `sample_count:11 sample_sum:0 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:-1 delta:2 delta:-2 delta:2 > sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + { + name: "wide zero bucket", + observations: []float64{0, -1, -1.2, -1.4, -1.8, -2, 1, 1.2, 1.4, 1.8, 2}, + factor: 1.2, + zeroThreshold: 1.4, + want: `sample_count:11 sample_sum:0 sb_schema:2 sb_zero_threshold:1.4 sb_zero_count:7 sb_negative: delta:2 > sb_positive: delta:2 > `, + }, + { + name: "NaN observation", + observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.NaN()}, + factor: 1.2, + want: `sample_count:7 sample_sum:nan sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + { + name: "+Inf observation", + observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.Inf(+1)}, + factor: 1.2, + want: `sample_count:7 sample_sum:inf sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: span: delta:1 delta:-1 delta:2 delta:-2 delta:2 delta:-1 > `, + }, + { + name: "-Inf observation", + observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.Inf(-1)}, + factor: 1.2, + want: `sample_count:7 sample_sum:-inf sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 > sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + his := NewHistogram(HistogramOpts{ + Name: "name", + Help: "help", + SparseBucketsFactor: s.factor, + SparseBucketsZeroThreshold: s.zeroThreshold, + }) + for _, o := range s.observations { + his.Observe(o) + } + m := &dto.Metric{} + if err := his.Write(m); err != nil { + t.Fatal("unexpected error writing metric", err) + } + got := m.Histogram.String() + if s.want != got { + t.Errorf("want histogram %q, got %q", s.want, got) + } + }) + } + +} From 514234486bc1628c19a886d2d97bdb4c40d35292 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 24 Jun 2021 22:12:46 +0200 Subject: [PATCH 12/25] Pin client_model to the most recent sparsehistogram commit Signed-off-by: beorn7 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 813ae76..d702e78 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/golang/protobuf v1.4.3 github.com/json-iterator/go v1.1.11 - github.com/prometheus/client_model v0.2.1-0.20210611125623-bbaf1cc17b15 + github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064 github.com/prometheus/common v0.26.0 github.com/prometheus/procfs v0.6.0 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 diff --git a/go.sum b/go.sum index fc2a056..6b7efe9 100644 --- a/go.sum +++ b/go.sum @@ -78,8 +78,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.1-0.20210611125623-bbaf1cc17b15 h1:l+7cw41KLeOScRk7f9Tg//xT8LAz55Kg+Fg9i0i0Cyw= -github.com/prometheus/client_model v0.2.1-0.20210611125623-bbaf1cc17b15/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064 h1:Kyx21CLOfWDA4e2TcOcupRl2g/Bmddu0AL0hR1BldEw= +github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= From aa6f67a9e64b3a02ca1b785be05bede58180cf00 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 29 Jun 2021 14:52:37 +0200 Subject: [PATCH 13/25] Add TODO about bucket search optimization Signed-off-by: beorn7 --- prometheus/histogram.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 1e3c4e1..7dda036 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -31,6 +31,12 @@ import ( // sparseBounds for the frac of observed values. Only relevant for schema > 0. // Position in the slice is the schema. (0 is never used, just here for // convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 var sparseBounds = [][]float64{ // Schema "0": []float64{0.5}, From 9ef5f90a767e6e4b42ffb2583014e07958519290 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 20 Jul 2021 19:01:13 +0200 Subject: [PATCH 14/25] Allow a zero threshold of zero Signed-off-by: beorn7 --- prometheus/examples_test.go | 2 -- prometheus/histogram.go | 49 ++++++++++++++++++++---------------- prometheus/histogram_test.go | 2 +- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/prometheus/examples_test.go b/prometheus/examples_test.go index bdcdfb4..a73ed18 100644 --- a/prometheus/examples_test.go +++ b/prometheus/examples_test.go @@ -538,8 +538,6 @@ func ExampleHistogram() { // cumulative_count: 816 // upper_bound: 40 // > - // sb_schema: 0 - // sb_zero_threshold: 0 // > } diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 7dda036..37d3bb0 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -369,8 +369,13 @@ type HistogramOpts struct { // SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For // best results, this should be close to a bucket boundary. This is // usually the case if picking a power of two. If - // SparseBucketsZeroThreshold is left at zero (or set to a negative - // value), DefSparseBucketsZeroThreshold is used as the threshold. + // SparseBucketsZeroThreshold is left at zero, + // DefSparseBucketsZeroThreshold is used as the threshold. If it is set + // to a negative value, a threshold of zero is used, i.e. only + // observations of precisely zero will go into the zero + // bucket. (TODO(beorn7): That's obviously weird and just a consequence + // of making the zero value of HistogramOpts meaningful. Has to be + // solved more elegantly in the final version.) SparseBucketsZeroThreshold float64 // TODO(beorn7): Need a setting to limit total bucket count and to // configure a strategy to enforce the limit, e.g. if minimum duration @@ -413,22 +418,24 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - sparseThreshold: opts.SparseBucketsZeroThreshold, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{{}, {}}, + now: time.Now, } if len(h.upperBounds) == 0 && opts.SparseBucketsFactor <= 1 { h.upperBounds = DefBuckets } - if h.sparseThreshold <= 0 { - h.sparseThreshold = DefSparseBucketsZeroThreshold - } if opts.SparseBucketsFactor <= 1 { - h.sparseThreshold = 0 // To mark that there are no sparse buckets. + h.sparseSchema = math.MinInt32 // To mark that there are no sparse buckets. } else { + switch { + case opts.SparseBucketsZeroThreshold > 0: + h.sparseThreshold = opts.SparseBucketsZeroThreshold + case opts.SparseBucketsZeroThreshold == 0: + h.sparseThreshold = DefSparseBucketsZeroThreshold + } // Leave h.sparseThreshold at 0 otherwise. h.sparseSchema = pickSparseSchema(opts.SparseBucketsFactor) } for i, upperBound := range h.upperBounds { @@ -559,8 +566,8 @@ type histogram struct { upperBounds []float64 labelPairs []*dto.LabelPair exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - sparseSchema int32 - sparseThreshold float64 // This is zero iff no sparse buckets are used. + sparseSchema int32 // Set to math.MinInt32 if no sparse buckets are used. + sparseThreshold float64 now func() time.Time // To mock out time.Now() for testing. } @@ -604,11 +611,9 @@ func (h *histogram) Write(out *dto.Metric) error { } his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - SbSchema: &h.sparseSchema, - SbZeroThreshold: &h.sparseThreshold, + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } out.Histogram = his out.Label = h.labelPairs @@ -648,7 +653,9 @@ func (h *histogram) Write(out *dto.Metric) error { atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) atomic.StoreUint64(&coldCounts.buckets[i], 0) } - if h.sparseThreshold != 0 { + if h.sparseSchema > math.MinInt32 { + his.SbZeroThreshold = &h.sparseThreshold + his.SbSchema = &h.sparseSchema zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) defer func() { @@ -749,7 +756,7 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { // Do not add to sparse buckets for NaN observations. - doSparse := h.sparseThreshold != 0 && !math.IsNaN(v) + doSparse := h.sparseSchema > math.MinInt32 && !math.IsNaN(v) var whichSparse, sparseKey int if doSparse { switch { diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index 61d8047..08e5e9e 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -470,7 +470,7 @@ func TestSparseHistogram(t *testing.T) { name: "no sparse buckets", observations: []float64{1, 2, 3}, factor: 1, - want: `sample_count:3 sample_sum:6 bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: sb_schema:0 sb_zero_threshold:0 `, // Has conventional buckets because there are no sparse buckets. + want: `sample_count:3 sample_sum:6 bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: bucket: `, // Has conventional buckets because there are no sparse buckets. }, { name: "factor 1.1 results in schema 3", From 24099603bc44b75a6946744e1d468ff59d1c0ac6 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 18 Aug 2021 19:04:29 +0200 Subject: [PATCH 15/25] Implement strategy to limit the sparse bucket count Signed-off-by: beorn7 --- prometheus/histogram.go | 624 ++++++++++++++++++++++++++--------- prometheus/histogram_test.go | 244 ++++++++++++-- 2 files changed, 689 insertions(+), 179 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 219be31..1136907 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -39,21 +39,21 @@ import ( // https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 var sparseBounds = [][]float64{ // Schema "0": - []float64{0.5}, + {0.5}, // Schema 1: - []float64{0.5, 0.7071067811865475}, + {0.5, 0.7071067811865475}, // Schema 2: - []float64{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, // Schema 3: - []float64{0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + {0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711}, // Schema 4: - []float64{0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + {0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735}, // Schema 5: - []float64{0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + {0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, @@ -62,7 +62,7 @@ var sparseBounds = [][]float64{ 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999}, // Schema 6: - []float64{0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + {0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, @@ -79,7 +79,7 @@ var sparseBounds = [][]float64{ 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752}, // Schema 7: - []float64{0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + {0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, @@ -112,7 +112,7 @@ var sparseBounds = [][]float64{ 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328}, // Schema 8: - []float64{0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + {0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, @@ -405,10 +405,27 @@ type HistogramOpts struct { // of making the zero value of HistogramOpts meaningful. Has to be // solved more elegantly in the final version.) SparseBucketsZeroThreshold float64 - // TODO(beorn7): Need a setting to limit total bucket count and to - // configure a strategy to enforce the limit, e.g. if minimum duration - // after last reset, reset. If not, half the resolution and/or expand - // the zero bucket. + + // The remaining fields define a strategy to limit the number of + // populated sparse buckets. If SparseBucketsMaxNumber is left at zero, + // the number of buckets is not limited. Otherwise, once the provided + // number is exceeded, the following strategy is enacted: First, if the + // last reset (or the creation) of the histogram is at least + // SparseBucketsMinResetDuration ago, then the whole histogram is reset + // to its initial state (including regular buckets). If less time has + // passed, or if SparseBucketsMinResetDuration is zero, no reset is + // performed. Instead, the zero threshold is increased sufficiently to + // reduce the number of buckets to or below SparseBucketsMaxNumber, but + // not to more than SparseBucketsMaxZeroThreshold. Thus, if + // SparseBucketsMaxZeroThreshold is already at or below the current zero + // threshold, nothing happens at this step. After that, if the number of + // buckets still exceeds SparseBucketsMaxNumber, the resolution of the + // histogram is reduced by doubling the width of the sparse buckets (up + // to a growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + SparseBucketsMaxNumber uint32 + SparseBucketsMinResetDuration time.Duration + SparseBucketsMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -446,11 +463,14 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + sparseMaxBuckets: opts.SparseBucketsMaxNumber, + sparseMaxZeroThreshold: opts.SparseBucketsMaxZeroThreshold, + sparseMinResetDuration: opts.SparseBucketsMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, } if len(h.upperBounds) == 0 && opts.SparseBucketsFactor <= 1 { h.upperBounds = DefBuckets @@ -460,9 +480,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } else { switch { case opts.SparseBucketsZeroThreshold > 0: - h.sparseThreshold = opts.SparseBucketsZeroThreshold + h.sparseZeroThreshold = opts.SparseBucketsZeroThreshold case opts.SparseBucketsZeroThreshold == 0: - h.sparseThreshold = DefSparseBucketsZeroThreshold + h.sparseZeroThreshold = DefSparseBucketsZeroThreshold } // Leave h.sparseThreshold at 0 otherwise. h.sparseSchema = pickSparseSchema(opts.SparseBucketsFactor) } @@ -483,8 +503,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + sparseZeroThresholdBits: math.Float64bits(h.sparseZeroThreshold), + sparseSchema: h.sparseSchema, + } + h.counts[1] = &histogramCounts{ + buckets: make([]uint64, len(h.upperBounds)), + sparseZeroThresholdBits: math.Float64bits(h.sparseZeroThreshold), + sparseSchema: h.sparseSchema, + } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -492,14 +520,32 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // sparseZeroBucket counts all (positive and negative) observations in + // the zero bucket (with an absolute value less or equal the current + // threshold, see next field. + sparseZeroBucket uint64 + // sparseZeroThresholdBits is the bit pattern of the current threshold + // for the zero bucket. It's initially equal to sparseZeroThreshold but + // may change according to the bucket count limitation strategy. + sparseZeroThresholdBits uint64 + // sparseSchema may change over time according to the bucket count + // limitation strategy and therefore has to be saved here. + sparseSchema int32 + // Number of (positive and negative) sparse buckets. + sparseBucketsNumber uint32 + + // Regular buckets. buckets []uint64 - // sparse buckets are implemented with a sync.Map for now. A dedicated + + // Sparse buckets are implemented with a sync.Map for now. A dedicated // data structure will likely be more efficient. There are separate maps // for negative and positive observations. The map's value is an *int64, // counting observations in that bucket. (Note that we don't use uint64 @@ -508,18 +554,12 @@ type histogramCounts struct { // map's key is the index of the bucket according to the used // sparseSchema. Index 0 is for an upper bound of 1. sparseBucketsPositive, sparseBucketsNegative sync.Map - // sparseZeroBucket counts all (positive and negative) observations in - // the zero bucket (with an absolute value less or equal - // SparseBucketsZeroThreshold). - sparseZeroBucket uint64 } // observe manages the parts of observe that only affects // histogramCounts. doSparse is true if spare buckets should be done, -// too. whichSparse is 0 for the sparseZeroBucket and +1 or -1 for -// sparseBucketsPositive or sparseBucketsNegative, respectively. sparseKey is -// the key of the sparse bucket to use. -func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool, whichSparse int, sparseKey int) { +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if bucket < len(hc.buckets) { atomic.AddUint64(&hc.buckets[bucket], 1) } @@ -531,15 +571,36 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool, whichSp } } if doSparse { - switch whichSparse { - case 0: - atomic.AddUint64(&hc.sparseZeroBucket, 1) - case +1: - addToSparseBucket(&hc.sparseBucketsPositive, sparseKey, 1) - case -1: - addToSparseBucket(&hc.sparseBucketsNegative, sparseKey, 1) + var ( + sparseKey int + sparseSchema = atomic.LoadInt32(&hc.sparseSchema) + sparseZeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.sparseZeroThresholdBits)) + frac, exp = math.Frexp(math.Abs(v)) + bucketCreated bool + ) + switch { + case math.IsInf(v, 0): + sparseKey = math.MaxInt32 // Largest possible sparseKey. + case sparseSchema > 0: + bounds := sparseBounds[sparseSchema] + sparseKey = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) default: - panic(fmt.Errorf("invalid value for whichSparse: %d", whichSparse)) + sparseKey = exp + if frac == 0.5 { + sparseKey-- + } + sparseKey /= 1 << -sparseSchema + } + switch { + case v > sparseZeroThreshold: + bucketCreated = addToSparseBucket(&hc.sparseBucketsPositive, sparseKey, 1) + case v < -sparseZeroThreshold: + bucketCreated = addToSparseBucket(&hc.sparseBucketsNegative, sparseKey, 1) + default: + atomic.AddUint64(&hc.sparseZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.sparseBucketsNumber, 1) } } // Increment count last as we take it as a signal that the observation @@ -547,21 +608,6 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool, whichSp atomic.AddUint64(&hc.count, 1) } -func addToSparseBucket(buckets *sync.Map, key int, increment int64) { - if existingBucket, ok := buckets.Load(key); ok { - // Fast path without allocation. - atomic.AddInt64(existingBucket.(*int64), increment) - return - } - // Bucket doesn't exist yet. Slow path allocating new counter. - newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. - if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { - // The bucket was created concurrently in another goroutine. - // Have to increment after all. - atomic.AddInt64(actualBucket.(*int64), increment) - } -} - type histogram struct { // countAndHotIdx enables lock-free writes with use of atomic updates. // The most significant bit is the hot index [0 or 1] of the count field @@ -582,8 +628,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -591,11 +639,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - sparseSchema int32 // Set to math.MinInt32 if no sparse buckets are used. - sparseThreshold float64 + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + sparseSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + sparseZeroThreshold float64 // The initial zero threshold. + sparseMaxZeroThreshold float64 + sparseMaxBuckets uint32 + sparseMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -619,8 +671,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -682,15 +734,15 @@ func (h *histogram) Write(out *dto.Metric) error { atomic.StoreUint64(&coldCounts.buckets[i], 0) } if h.sparseSchema > math.MinInt32 { - his.SbZeroThreshold = &h.sparseThreshold - his.SbSchema = &h.sparseSchema + his.SbZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sparseZeroThresholdBits))) + his.SbSchema = proto.Int32(atomic.LoadInt32(&coldCounts.sparseSchema)) zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) defer func() { atomic.AddUint64(&hotCounts.sparseZeroBucket, zeroBucket) atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) - coldCounts.sparseBucketsPositive.Range(addAndReset(&hotCounts.sparseBucketsPositive)) - coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative)) + coldCounts.sparseBucketsPositive.Range(addAndReset(&hotCounts.sparseBucketsPositive, &hotCounts.sparseBucketsNumber)) + coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative, &hotCounts.sparseBucketsNumber)) }() his.SbZeroCount = proto.Uint64(zeroBucket) @@ -700,72 +752,6 @@ func (h *histogram) Write(out *dto.Metric) error { return nil } -func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { - var ii []int - buckets.Range(func(k, v interface{}) bool { - ii = append(ii, k.(int)) - return true - }) - sort.Ints(ii) - - if len(ii) == 0 { - return nil - } - - sbs := dto.SparseBuckets{} - var prevCount int64 - var nextI int - - appendDelta := func(count int64) { - *sbs.Span[len(sbs.Span)-1].Length++ - sbs.Delta = append(sbs.Delta, count-prevCount) - prevCount = count - } - - for n, i := range ii { - v, _ := buckets.Load(i) - count := atomic.LoadInt64(v.(*int64)) - // Multiple spans with only small gaps in between are probably - // encoded more efficiently as one larger span with a few empty - // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create - // a new span. - iDelta := int32(i - nextI) - if n == 0 || iDelta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap - // of more than two buckets. - sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ - Offset: proto.Int32(iDelta), - Length: proto.Uint32(0), - }) - } else { - // We have found a small gap (or no gap at all). - // Insert empty buckets as needed. - for j := int32(0); j < iDelta; j++ { - appendDelta(0) - } - } - appendDelta(count) - nextI = i + 1 - } - return &sbs -} - -// addAndReset returns a function to be used with sync.Map.Range of spare -// buckets in coldCounts. It increments the buckets in the provided hotBuckets -// according to the buckets ranged through. It then resets all buckets ranged -// through to 0 (but leaves them in place so that they don't need to get -// recreated on the next scrape). -func addAndReset(hotBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - bucket := v.(*int64) - addToSparseBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) - atomic.StoreInt64(bucket, 0) - return true - } -} - // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { @@ -785,34 +771,235 @@ func (h *histogram) findBucket(v float64) int { func (h *histogram) observe(v float64, bucket int) { // Do not add to sparse buckets for NaN observations. doSparse := h.sparseSchema > math.MinInt32 && !math.IsNaN(v) - var whichSparse, sparseKey int - if doSparse { - switch { - case v > h.sparseThreshold: - whichSparse = +1 - case v < -h.sparseThreshold: - whichSparse = -1 - } - frac, exp := math.Frexp(math.Abs(v)) - switch { - case math.IsInf(v, 0): - sparseKey = math.MaxInt32 // Largest possible sparseKey. - case h.sparseSchema > 0: - bounds := sparseBounds[h.sparseSchema] - sparseKey = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) - default: - sparseKey = exp - if frac == 0.5 { - sparseKey-- - } - sparseKey /= 1 << -h.sparseSchema - } - } // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) - h.counts[n>>63].observe(v, bucket, doSparse, whichSparse, sparseKey) + hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitSparseBuckets(hotCounts, v, bucket) + } +} + +// limitSparsebuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, bucket int) { + if h.sparseMaxBuckets == 0 { + return // No limit configured. + } + if h.sparseMaxBuckets >= atomic.LoadUint32(&counts.sparseBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.sparseMaxBuckets >= atomic.LoadUint32(&hotCounts.sparseBucketsNumber) { + return // Bucket limit not exceeded after all. + } + + // (1) Ideally, we can reset the whole histogram. + + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.sparseMinResetDuration > 0 && h.now().Sub(h.lastResetTime) >= h.sparseMinResetDuration { + // Completely reset coldCounts. + h.resetCounts(coldCounts) + // Repeat the latest observation to not lose it completely. + coldCounts.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + // Wait for the formerly hot counts to cool down. + for count != atomic.LoadUint64(&hotCounts.count) { + runtime.Gosched() // Let observations get work done. + } + // Finally, reset the formerly hot counts, too. + h.resetCounts(hotCounts) + h.lastResetTime = h.now() + return + } + + // (2) Try widening the zero bucket. + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hotCounts.sparseZeroThresholdBits)) + switch { // Use switch rather than if to be able to break out of it. + case h.sparseMaxZeroThreshold > currentZeroThreshold: + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hotCounts.sparseBucketsPositive) + smallestNegativeKey := findSmallestKey(&hotCounts.sparseBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + break + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hotCounts.sparseSchema)) + if newZeroThreshold > h.sparseMaxZeroThreshold { + break // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&coldCounts.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := coldCounts.sparseBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomic.AddUint32(&coldCounts.sparseBucketsNumber, ^uint32(0)) // Decrement, see https://pkg.go.dev/sync/atomic#AddUint32 + } + if _, loaded := coldCounts.sparseBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomic.AddUint32(&coldCounts.sparseBucketsNumber, ^uint32(0)) // Decrement, see https://pkg.go.dev/sync/atomic#AddUint32 + } + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hotCounts, coldCounts = coldCounts, hotCounts + // Wait for the (new) cold counts to cool down. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + // Add all the cold counts to the new hot counts, while merging + // the newly deleted buckets into the wider zero bucket, and + // reset and adjust the cold counts. + // TODO(beorn7): Maybe make it more DRY, cf. Write() method. Maybe + // it's too different, though... + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + hotBits := atomic.LoadUint64(&hotCounts.sumBits) + coldBits := atomic.LoadUint64(&coldCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(hotBits) + math.Float64frombits(coldBits)) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, hotBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + atomic.AddUint64(&hotCounts.sparseZeroBucket, atomic.LoadUint64(&coldCounts.sparseZeroBucket)) + atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) + atomic.StoreUint64(&coldCounts.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) + + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hotCounts.sparseZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomic.AddUint32(&coldCounts.sparseBucketsNumber, ^uint32(0)) // Decrement, see https://pkg.go.dev/sync/atomic#AddUint32 + } else { + // Add to corresponding hot bucket... + if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hotCounts.sparseBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true + } + } + + coldCounts.sparseBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hotCounts.sparseBucketsPositive, &coldCounts.sparseBucketsPositive)) + coldCounts.sparseBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hotCounts.sparseBucketsNegative, &coldCounts.sparseBucketsNegative)) + return + } + + // (3) Ultima ratio: Doubling of the bucket width AKA halving the resolution AKA decrementing sparseSchema. + coldSchema := atomic.LoadInt32(&coldCounts.sparseSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&coldCounts.sparseSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&coldCounts.sparseBucketsNumber, 0) + deleteSyncMap(&coldCounts.sparseBucketsNegative) + deleteSyncMap(&coldCounts.sparseBucketsPositive) + // Make coldCounts the new hot counts. + n = atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hotCounts, coldCounts = coldCounts, hotCounts + // Wait for the (new) cold counts to cool down. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + // Add all the cold counts to the new hot counts, while merging the cold + // buckets into the wider hot buckets, and reset and adjust the cold + // counts. + // TODO(beorn7): Maybe make it more DRY, cf. Write() method and code + // above. Maybe it's too different, though... + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + hotBits := atomic.LoadUint64(&hotCounts.sumBits) + coldBits := atomic.LoadUint64(&coldCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(hotBits) + math.Float64frombits(coldBits)) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, hotBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + atomic.AddUint64(&hotCounts.sparseZeroBucket, atomic.LoadUint64(&coldCounts.sparseZeroBucket)) + atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) + + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hotCounts.sparseBucketsNumber, 1) + } + return true + } + } + + coldCounts.sparseBucketsPositive.Range(merge(&hotCounts.sparseBucketsPositive)) + coldCounts.sparseBucketsNegative.Range(merge(&hotCounts.sparseBucketsNegative)) + atomic.StoreInt32(&coldCounts.sparseSchema, coldSchema) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&coldCounts.sparseBucketsNumber, 0) + deleteSyncMap(&coldCounts.sparseBucketsNegative) + deleteSyncMap(&coldCounts.sparseBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.sparseZeroBucket, 0) + atomic.StoreUint64(&counts.sparseZeroThresholdBits, math.Float64bits(h.sparseZeroThreshold)) + atomic.StoreInt32(&counts.sparseSchema, h.sparseSchema) + atomic.StoreUint32(&counts.sparseBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.sparseBucketsNegative) + deleteSyncMap(&counts.sparseBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -1081,3 +1268,120 @@ func pickSparseSchema(bucketFactor float64) int32 { return -int32(floor) } } + +func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil + } + + sbs := dto.SparseBuckets{} + var prevCount int64 + var nextI int + + appendDelta := func(count int64) { + *sbs.Span[len(sbs.Span)-1].Length++ + sbs.Delta = append(sbs.Delta, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one ore two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return &sbs +} + +// addToSparseBucket increments the sparse bucket at key by the provided +// amount. It returns true if a new sparse bucket had to be created for that. +func addToSparseBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToSparseBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + if schema < 0 { + return math.Ldexp(1, key<<(-schema)) + } + + fracIdx := key & ((1 << schema) - 1) + frac := sparseBounds[schema][fracIdx] + exp := (key >> schema) + 1 + return math.Ldexp(frac, exp) +} diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index 9971d62..46e48c1 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -20,6 +20,7 @@ import ( "runtime" "sort" "sync" + "sync/atomic" "testing" "testing/quick" "time" @@ -167,7 +168,7 @@ func TestHistogramConcurrency(t *testing.T) { start.Add(1) end.Add(concLevel) - sum := NewHistogram(HistogramOpts{ + his := NewHistogram(HistogramOpts{ Name: "test_histogram", Help: "helpless", Buckets: testBuckets, @@ -188,9 +189,9 @@ func TestHistogramConcurrency(t *testing.T) { start.Wait() for _, v := range vals { if n%2 == 0 { - sum.Observe(v) + his.Observe(v) } else { - sum.(ExemplarObserver).ObserveWithExemplar(v, Labels{"foo": "bar"}) + his.(ExemplarObserver).ObserveWithExemplar(v, Labels{"foo": "bar"}) } } end.Done() @@ -201,7 +202,7 @@ func TestHistogramConcurrency(t *testing.T) { end.Wait() m := &dto.Metric{} - sum.Write(m) + his.Write(m) if got, want := int(*m.Histogram.SampleCount), total; got != want { t.Errorf("got sample count %d, want %d", got, want) } @@ -424,24 +425,24 @@ func TestHistogramExemplar(t *testing.T) { } expectedExemplars := []*dto.Exemplar{ nil, - &dto.Exemplar{ + { Label: []*dto.LabelPair{ - &dto.LabelPair{Name: proto.String("id"), Value: proto.String("2")}, + {Name: proto.String("id"), Value: proto.String("2")}, }, Value: proto.Float64(1.6), Timestamp: ts, }, nil, - &dto.Exemplar{ + { Label: []*dto.LabelPair{ - &dto.LabelPair{Name: proto.String("id"), Value: proto.String("3")}, + {Name: proto.String("id"), Value: proto.String("3")}, }, Value: proto.Float64(4), Timestamp: ts, }, - &dto.Exemplar{ + { Label: []*dto.LabelPair{ - &dto.LabelPair{Name: proto.String("id"), Value: proto.String("4")}, + {Name: proto.String("id"), Value: proto.String("4")}, }, Value: proto.Float64(4.5), Timestamp: ts, @@ -470,11 +471,14 @@ func TestHistogramExemplar(t *testing.T) { func TestSparseHistogram(t *testing.T) { scenarios := []struct { - name string - observations []float64 - factor float64 - zeroThreshold float64 - want string // String representation of protobuf. + name string + observations []float64 // With simulated interval of 1m. + factor float64 + zeroThreshold float64 + maxBuckets uint32 + minResetDuration time.Duration + maxZeroThreshold float64 + want string // String representation of protobuf. }{ { name: "no sparse buckets", @@ -531,18 +535,122 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, want: `sample_count:7 sample_sum:-inf sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 > sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, }, + { + name: "limited buckets but nothing triggered", + observations: []float64{0, 1, 1.2, 1.4, 1.8, 2}, + factor: 1.2, + maxBuckets: 4, + want: `sample_count:6 sample_sum:7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + { + name: "buckets limited by halving resolution", + observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 3}, + factor: 1.2, + maxBuckets: 4, + want: `sample_count:8 sample_sum:11.5 sb_schema:1 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:2 delta:-1 delta:-2 delta:1 > `, + }, + { + name: "buckets limited by widening the zero bucket", + observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 3}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + want: `sample_count:8 sample_sum:11.5 sb_schema:2 sb_zero_threshold:1 sb_zero_count:2 sb_positive: delta:1 delta:1 delta:-2 delta:2 delta:-2 delta:0 delta:1 > `, + }, + { + name: "buckets limited by widening the zero bucket twice", + observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 3, 4}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + want: `sample_count:9 sample_sum:15.5 sb_schema:2 sb_zero_threshold:1.189207115002721 sb_zero_count:3 sb_positive: delta:2 delta:-2 delta:2 delta:-2 delta:0 delta:1 delta:0 > `, + }, + { + name: "buckets limited by reset", + observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 3, 4}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + minResetDuration: 5 * time.Minute, + want: `sample_count:2 sample_sum:7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:1 delta:0 > `, + }, + { + name: "limited buckets but nothing triggered, negative observations", + observations: []float64{0, -1, -1.2, -1.4, -1.8, -2}, + factor: 1.2, + maxBuckets: 4, + want: `sample_count:6 sample_sum:-7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + }, + { + name: "buckets limited by halving resolution, negative observations", + observations: []float64{0, -1, -1.1, -1.2, -1.4, -1.8, -2, -3}, + factor: 1.2, + maxBuckets: 4, + want: `sample_count:8 sample_sum:-11.5 sb_schema:1 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:2 delta:-1 delta:-2 delta:1 > `, + }, + { + name: "buckets limited by widening the zero bucket, negative observations", + observations: []float64{0, -1, -1.1, -1.2, -1.4, -1.8, -2, -3}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + want: `sample_count:8 sample_sum:-11.5 sb_schema:2 sb_zero_threshold:1 sb_zero_count:2 sb_negative: delta:1 delta:1 delta:-2 delta:2 delta:-2 delta:0 delta:1 > `, + }, + { + name: "buckets limited by widening the zero bucket twice, negative observations", + observations: []float64{0, -1, -1.1, -1.2, -1.4, -1.8, -2, -3, -4}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + want: `sample_count:9 sample_sum:-15.5 sb_schema:2 sb_zero_threshold:1.189207115002721 sb_zero_count:3 sb_negative: delta:2 delta:-2 delta:2 delta:-2 delta:0 delta:1 delta:0 > `, + }, + { + name: "buckets limited by reset, negative observations", + observations: []float64{0, -1, -1.1, -1.2, -1.4, -1.8, -2, -3, -4}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + minResetDuration: 5 * time.Minute, + want: `sample_count:2 sample_sum:-7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_negative: delta:1 delta:0 > `, + }, + { + name: "buckets limited by halving resolution, then reset", + observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 5, 5.1, 3, 4}, + factor: 1.2, + maxBuckets: 4, + minResetDuration: 9 * time.Minute, + want: `sample_count:2 sample_sum:7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:1 delta:0 > `, + }, + { + name: "buckets limited by widening the zero bucket, then reset", + observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 5, 5.1, 3, 4}, + factor: 1.2, + maxBuckets: 4, + maxZeroThreshold: 1.2, + minResetDuration: 9 * time.Minute, + want: `sample_count:2 sample_sum:7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:1 delta:0 > `, + }, } for _, s := range scenarios { t.Run(s.name, func(t *testing.T) { his := NewHistogram(HistogramOpts{ - Name: "name", - Help: "help", - SparseBucketsFactor: s.factor, - SparseBucketsZeroThreshold: s.zeroThreshold, + Name: "name", + Help: "help", + SparseBucketsFactor: s.factor, + SparseBucketsZeroThreshold: s.zeroThreshold, + SparseBucketsMaxNumber: s.maxBuckets, + SparseBucketsMinResetDuration: s.minResetDuration, + SparseBucketsMaxZeroThreshold: s.maxZeroThreshold, }) + ts := time.Now().Add(30 * time.Second) + now := func() time.Time { + return ts + } + his.(*histogram).now = now for _, o := range s.observations { his.Observe(o) + ts = ts.Add(time.Minute) } m := &dto.Metric{} if err := his.Write(m); err != nil { @@ -556,3 +664,101 @@ func TestSparseHistogram(t *testing.T) { } } + +func TestSparseHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogram(HistogramOpts{ + Name: "test_sparse_histogram", + Help: "This help is sparse.", + SparseBucketsFactor: 1.05, + SparseBucketsZeroThreshold: 0.0000001, + SparseBucketsMaxNumber: 50, + SparseBucketsMinResetDuration: time.Hour, // Comment out to test for totals below. + SparseBucketsMaxZeroThreshold: 0.001, + }) + + ts := time.Now().Add(30 * time.Second).Unix() + now := func() time.Time { + return time.Unix(atomic.LoadInt64(&ts), 0) + } + his.(*histogram).now = now + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + // An observation every 1 to 10 seconds. + atomic.AddInt64(&ts, rand.Int63n(10)+1) + his.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + his.Write(m) + + // Uncomment these tests for totals only if you have disabled histogram resets above. + // + // if got, want := int(*m.Histogram.SampleCount), total; got != want { + // t.Errorf("got sample count %d, want %d", got, want) + // } + // if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + // t.Errorf("got sample sum %f, want %f", got, want) + // } + + sumBuckets := int(m.Histogram.GetSbZeroCount()) + current := 0 + for _, delta := range m.Histogram.GetSbNegative().GetDelta() { + current += int(delta) + if current < 0 { + t.Fatalf("negative bucket population negative: %d", current) + } + sumBuckets += current + } + current = 0 + for _, delta := range m.Histogram.GetSbPositive().GetDelta() { + current += int(delta) + if current < 0 { + t.Fatalf("positive bucket population negative: %d", current) + } + sumBuckets += current + } + if got, want := sumBuckets, int(*m.Histogram.SampleCount); got != want { + t.Errorf("got bucket population sum %d, want %d", got, want) + } + + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} From 263be8dab7a35a94356772cf193ba630f883deb1 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 31 Aug 2021 20:17:19 +0200 Subject: [PATCH 16/25] Refactoring of sparse histograms Signed-off-by: beorn7 --- prometheus/histogram.go | 334 ++++++++++++++++++++-------------------- 1 file changed, 165 insertions(+), 169 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 1136907..cd61b83 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -563,13 +563,7 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if bucket < len(hc.buckets) { atomic.AddUint64(&hc.buckets[bucket], 1) } - for { - oldBits := atomic.LoadUint64(&hc.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hc.sumBits, oldBits, newBits) { - break - } - } + atomicAddFloat(&hc.sumBits, v) if doSparse { var ( sparseKey int @@ -685,10 +679,7 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), @@ -718,29 +709,12 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - // Add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } if h.sparseSchema > math.MinInt32 { his.SbZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sparseZeroThresholdBits))) his.SbSchema = proto.Int32(atomic.LoadInt32(&coldCounts.sparseSchema)) zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) defer func() { - atomic.AddUint64(&hotCounts.sparseZeroBucket, zeroBucket) - atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) coldCounts.sparseBucketsPositive.Range(addAndReset(&hotCounts.sparseBucketsPositive, &hotCounts.sparseBucketsNumber)) coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative, &hotCounts.sparseBucketsNumber)) }() @@ -749,6 +723,7 @@ func (h *histogram) Write(out *dto.Metric) error { his.SbNegative = makeSparseBuckets(&coldCounts.sparseBucketsNegative) his.SbPositive = makeSparseBuckets(&coldCounts.sparseBucketsPositive) } + addAndResetCounts(hotCounts, coldCounts) return nil } @@ -809,159 +784,138 @@ func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, b if h.sparseMaxBuckets >= atomic.LoadUint32(&hotCounts.sparseBucketsNumber) { return // Bucket limit not exceeded after all. } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} - // (1) Ideally, we can reset the whole histogram. - +// maybyReset resests the whole histogram if at least h.sparseMinResetDuration +// has been passed. It returns true if the histogram has been reset. The caller +// must have locked h.mtx. +func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.sparseMinResetDuration > 0 && h.now().Sub(h.lastResetTime) >= h.sparseMinResetDuration { - // Completely reset coldCounts. - h.resetCounts(coldCounts) - // Repeat the latest observation to not lose it completely. - coldCounts.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. - n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) - count := n & ((1 << 63) - 1) - // Wait for the formerly hot counts to cool down. - for count != atomic.LoadUint64(&hotCounts.count) { - runtime.Gosched() // Let observations get work done. - } - // Finally, reset the formerly hot counts, too. - h.resetCounts(hotCounts) - h.lastResetTime = h.now() - return + if h.sparseMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.sparseMinResetDuration { + return false } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while ressetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} - // (2) Try widening the zero bucket. - currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hotCounts.sparseZeroThresholdBits)) - switch { // Use switch rather than if to be able to break out of it. - case h.sparseMaxZeroThreshold > currentZeroThreshold: - // Find the key of the bucket closest to zero. - smallestKey := findSmallestKey(&hotCounts.sparseBucketsPositive) - smallestNegativeKey := findSmallestKey(&hotCounts.sparseBucketsNegative) - if smallestNegativeKey < smallestKey { - smallestKey = smallestNegativeKey - } - if smallestKey == math.MaxInt32 { - break - } - newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hotCounts.sparseSchema)) - if newZeroThreshold > h.sparseMaxZeroThreshold { - break // New threshold would exceed the max threshold. - } - atomic.StoreUint64(&coldCounts.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // Remove applicable buckets. - if _, loaded := coldCounts.sparseBucketsNegative.LoadAndDelete(smallestKey); loaded { - atomic.AddUint32(&coldCounts.sparseBucketsNumber, ^uint32(0)) // Decrement, see https://pkg.go.dev/sync/atomic#AddUint32 - } - if _, loaded := coldCounts.sparseBucketsPositive.LoadAndDelete(smallestKey); loaded { - atomic.AddUint32(&coldCounts.sparseBucketsNumber, ^uint32(0)) // Decrement, see https://pkg.go.dev/sync/atomic#AddUint32 - } - // Make coldCounts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hotCounts, coldCounts = coldCounts, hotCounts - // Wait for the (new) cold counts to cool down. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - // Add all the cold counts to the new hot counts, while merging - // the newly deleted buckets into the wider zero bucket, and - // reset and adjust the cold counts. - // TODO(beorn7): Maybe make it more DRY, cf. Write() method. Maybe - // it's too different, though... - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - hotBits := atomic.LoadUint64(&hotCounts.sumBits) - coldBits := atomic.LoadUint64(&coldCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(hotBits) + math.Float64frombits(coldBits)) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, hotBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } - atomic.AddUint64(&hotCounts.sparseZeroBucket, atomic.LoadUint64(&coldCounts.sparseZeroBucket)) - atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) - atomic.StoreUint64(&coldCounts.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) - - mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - if key == smallestKey { - // Merge into hot zero bucket... - atomic.AddUint64(&hotCounts.sparseZeroBucket, uint64(atomic.LoadInt64(bucket))) - // ...and delete from cold counts. - coldBuckets.Delete(key) - atomic.AddUint32(&coldCounts.sparseBucketsNumber, ^uint32(0)) // Decrement, see https://pkg.go.dev/sync/atomic#AddUint32 - } else { - // Add to corresponding hot bucket... - if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hotCounts.sparseBucketsNumber, 1) - } - // ...and reset cold bucket. - atomic.StoreInt64(bucket, 0) +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.sparseMaxZeroThreshold limits how +// far the zero bucket can be extended, and if that's not enough to include an +// existing bucket, the method returns false. The caller must have locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.sparseZeroThresholdBits)) + if currentZeroThreshold >= h.sparseMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.sparseBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.sparseBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.sparseSchema)) + if newZeroThreshold > h.sparseMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.sparseBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.sparseBucketsNumber) + } + if _, loaded := cold.sparseBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.sparseBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.sparseZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.sparseBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.sparseBucketsNumber, 1) } - return true + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) } + return true } - - coldCounts.sparseBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hotCounts.sparseBucketsPositive, &coldCounts.sparseBucketsPositive)) - coldCounts.sparseBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hotCounts.sparseBucketsNegative, &coldCounts.sparseBucketsNegative)) - return } - // (3) Ultima ratio: Doubling of the bucket width AKA halving the resolution AKA decrementing sparseSchema. - coldSchema := atomic.LoadInt32(&coldCounts.sparseSchema) + cold.sparseBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.sparseBucketsPositive, &cold.sparseBucketsPositive)) + cold.sparseBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.sparseBucketsNegative, &cold.sparseBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.sparseSchema) if coldSchema == -4 { return // Already at lowest resolution. } coldSchema-- - atomic.StoreInt32(&coldCounts.sparseSchema, coldSchema) + atomic.StoreInt32(&cold.sparseSchema, coldSchema) // Play it simple and just delete all cold buckets. - atomic.StoreUint32(&coldCounts.sparseBucketsNumber, 0) - deleteSyncMap(&coldCounts.sparseBucketsNegative) - deleteSyncMap(&coldCounts.sparseBucketsPositive) + atomic.StoreUint32(&cold.sparseBucketsNumber, 0) + deleteSyncMap(&cold.sparseBucketsNegative) + deleteSyncMap(&cold.sparseBucketsPositive) // Make coldCounts the new hot counts. - n = atomic.AddUint64(&h.countAndHotIdx, 1<<63) + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) count := n & ((1 << 63) - 1) // Swap the pointer names to represent the new roles and make // the rest less confusing. - hotCounts, coldCounts = coldCounts, hotCounts - // Wait for the (new) cold counts to cool down. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - // Add all the cold counts to the new hot counts, while merging the cold - // buckets into the wider hot buckets, and reset and adjust the cold - // counts. - // TODO(beorn7): Maybe make it more DRY, cf. Write() method and code - // above. Maybe it's too different, though... - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - hotBits := atomic.LoadUint64(&hotCounts.sumBits) - coldBits := atomic.LoadUint64(&coldCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(hotBits) + math.Float64frombits(coldBits)) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, hotBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } - atomic.AddUint64(&hotCounts.sparseZeroBucket, atomic.LoadUint64(&coldCounts.sparseZeroBucket)) - atomic.StoreUint64(&coldCounts.sparseZeroBucket, 0) - + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.sparseSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { return func(k, v interface{}) bool { key := k.(int) @@ -973,19 +927,18 @@ func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, b key /= 2 // Add to corresponding hot bucket. if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hotCounts.sparseBucketsNumber, 1) + atomic.AddUint32(&hot.sparseBucketsNumber, 1) } return true } } - coldCounts.sparseBucketsPositive.Range(merge(&hotCounts.sparseBucketsPositive)) - coldCounts.sparseBucketsNegative.Range(merge(&hotCounts.sparseBucketsNegative)) - atomic.StoreInt32(&coldCounts.sparseSchema, coldSchema) + cold.sparseBucketsPositive.Range(merge(&hot.sparseBucketsPositive)) + cold.sparseBucketsNegative.Range(merge(&hot.sparseBucketsNegative)) // Play it simple again and just delete all cold buckets. - atomic.StoreUint32(&coldCounts.sparseBucketsNumber, 0) - deleteSyncMap(&coldCounts.sparseBucketsNegative) - deleteSyncMap(&coldCounts.sparseBucketsPositive) + atomic.StoreUint32(&cold.sparseBucketsNumber, 0) + deleteSyncMap(&cold.sparseBucketsNegative) + deleteSyncMap(&cold.sparseBucketsPositive) } func (h *histogram) resetCounts(counts *histogramCounts) { @@ -1385,3 +1338,46 @@ func getLe(key int, schema int32) float64 { exp := (key >> schema) + 1 return math.Ldexp(frac, exp) } + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, +// sparse zero bucket) from the cold counts to the corresponding fields in the +// hot counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.sparseZeroBucket, atomic.LoadUint64(&cold.sparseZeroBucket)) + atomic.StoreUint64(&cold.sparseZeroBucket, 0) +} From 70253f4dd027a7128cdd681c22448d65ca30eed7 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 11 Jan 2022 14:07:18 +0100 Subject: [PATCH 17/25] Fix typo in doc comment Signed-off-by: beorn7 --- prometheus/histogram.go | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index cd61b83..771053e 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -45,24 +45,31 @@ var sparseBounds = [][]float64{ // Schema 2: {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, // Schema 3: - {0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, - 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711}, + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, // Schema 4: - {0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, - 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735}, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, // Schema 5: - {0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, - 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999}, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, // Schema 6: - {0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, @@ -77,9 +84,11 @@ var sparseBounds = [][]float64{ 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, - 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752}, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, // Schema 7: - {0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, @@ -110,9 +119,11 @@ var sparseBounds = [][]float64{ 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, - 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328}, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, // Schema 8: - {0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, @@ -175,7 +186,8 @@ var sparseBounds = [][]float64{ 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, - 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698}, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, } // The sparseBounds above can be generated with the code below. @@ -794,7 +806,7 @@ func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, b h.doubleBucketWidth(hotCounts, coldCounts) } -// maybyReset resests the whole histogram if at least h.sparseMinResetDuration +// maybeReset resests the whole histogram if at least h.sparseMinResetDuration // has been passed. It returns true if the histogram has been reset. The caller // must have locked h.mtx. func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { From a27b6d74f6b1b711b3c5b3d8088057c83be9fdc4 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Fri, 13 May 2022 10:59:26 +0200 Subject: [PATCH 18/25] Fix conflicts Signed-off-by: Kemal Akkoyun --- prometheus/gen_go_collector_metrics_set.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/prometheus/gen_go_collector_metrics_set.go b/prometheus/gen_go_collector_metrics_set.go index c6c7704..23b2f01 100644 --- a/prometheus/gen_go_collector_metrics_set.go +++ b/prometheus/gen_go_collector_metrics_set.go @@ -38,15 +38,8 @@ func main() { log.Fatal("requires Go version (e.g. go1.17) as an argument") } toolVersion := runtime.Version() -<<<<<<< HEAD if majorVersion := toolVersion[:strings.LastIndexByte(toolVersion, '.')]; majorVersion != os.Args[1] { log.Fatalf("using Go version %q but expected Go version %q", majorVersion, os.Args[1]) -======= - mtv := majorVersion(toolVersion) - mv := majorVersion(os.Args[1]) - if mtv != mv { - log.Fatalf("using Go version %q but expected Go version %q", mtv, mv) ->>>>>>> f251146 (prometheus: Fix convention violating names for generated collector metrics (#1048)) } version, err := parseVersion(mv) if err != nil { From eb59a7b3d7fc6db434780d6af357dd658cffd406 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Rabenstein?= Date: Sun, 15 May 2022 20:22:30 +0200 Subject: [PATCH 19/25] Histogram: Fix bug with negative schemas (#1054) * Histogram: Expose bug with negative schema Signed-off-by: beorn7 * Histogram: Fix bug with negative schemas Signed-off-by: beorn7 --- prometheus/histogram.go | 3 ++- prometheus/histogram_test.go | 26 +++++++++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index e921585..fea0142 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -595,7 +595,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if frac == 0.5 { sparseKey-- } - sparseKey /= 1 << -sparseSchema + div := 1 << -sparseSchema + sparseKey = (sparseKey + div - 1) / div } switch { case v > sparseZeroThreshold: diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index 75a355a..81803d5 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -355,7 +355,8 @@ func TestBuckets(t *testing.T) { } got = ExponentialBucketsRange(1, 100, 10) - want = []float64{1.0, 1.6681005372000588, 2.782559402207125, + want = []float64{ + 1.0, 1.6681005372000588, 2.782559402207125, 4.641588833612779, 7.742636826811273, 12.915496650148842, 21.544346900318846, 35.93813663804629, 59.94842503189414, 100.00000000000007, @@ -469,7 +470,6 @@ func TestHistogramExemplar(t *testing.T) { } func TestSparseHistogram(t *testing.T) { - scenarios := []struct { name string observations []float64 // With simulated interval of 1m. @@ -498,6 +498,27 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, want: `sample_count:6 sample_sum:7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, }, + { + name: "factor 4 results in schema -1", + observations: []float64{ + 0.5, 1, // Bucket 0: (0.25, 1] + 1.5, 2, 3, 3.5, // Bucket 1: (1, 4] + 5, 6, 7, // Bucket 2: (4, 16] + 33.33, // Bucket 3: (16, 64] + }, + factor: 4, + want: `sample_count:10 sample_sum:62.83 sb_schema:-1 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:2 delta:2 delta:-1 delta:-2 > `, + }, + { + name: "factor 17 results in schema -2", + observations: []float64{ + 0.5, 1, // Bucket 0: (0.0625, 1] + 1.5, 2, 3, 3.5, 5, 6, 7, // Bucket 1: (1, 16] + 33.33, // Bucket 2: (16, 256] + }, + factor: 17, + want: `sample_count:10 sample_sum:62.83 sb_schema:-2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:2 delta:5 delta:-6 > `, + }, { name: "negative buckets", observations: []float64{0, -1, -1.2, -1.4, -1.8, -2}, @@ -662,7 +683,6 @@ func TestSparseHistogram(t *testing.T) { } }) } - } func TestSparseHistogramConcurrency(t *testing.T) { From 8cbcd4076ad216b82223282b9bdad9d2aa98794f Mon Sep 17 00:00:00 2001 From: beorn7 Date: Tue, 19 Jul 2022 16:47:44 +0200 Subject: [PATCH 20/25] histograms: Move to new exposition protobuf format Note that this is an incompatible change. To scrape this new format, the Prometheus server needs to be updated at the same time. PR incoming. Signed-off-by: beorn7 --- go.mod | 2 +- go.sum | 4 +-- prometheus/histogram.go | 58 +++++++++++++++++++----------------- prometheus/histogram_test.go | 50 +++++++++++++++---------------- 4 files changed, 59 insertions(+), 55 deletions(-) diff --git a/go.mod b/go.mod index 24d9fa6..be69e3a 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/golang/protobuf v1.5.2 github.com/json-iterator/go v1.1.12 - github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064 + github.com/prometheus/client_model v0.2.1-0.20220719122737-1f8dcad1221e github.com/prometheus/common v0.35.0 github.com/prometheus/procfs v0.7.3 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a diff --git a/go.sum b/go.sum index dc6ec52..9e02d7f 100644 --- a/go.sum +++ b/go.sum @@ -135,8 +135,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064 h1:Kyx21CLOfWDA4e2TcOcupRl2g/Bmddu0AL0hR1BldEw= -github.com/prometheus/client_model v0.2.1-0.20210624201024-61b6c1aac064/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.2.1-0.20220719122737-1f8dcad1221e h1:KjoQdMEQmNC8smQ731iHAXnbFbApg4uu60fNcWHs3Bk= +github.com/prometheus/client_model v0.2.1-0.20220719122737-1f8dcad1221e/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.35.0 h1:Eyr+Pw2VymWejHqCugNaQXkAi6KayVNxaHeu6khmFBE= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= diff --git a/prometheus/histogram.go b/prometheus/histogram.go index fea0142..eb80fad 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -382,19 +382,20 @@ type HistogramOpts struct { Buckets []float64 // If SparseBucketsFactor is greater than one, sparse buckets are used - // (in addition to the regular buckets, if defined above). Sparse - // buckets are exponential buckets covering the whole float64 range - // (with the exception of the “zero” bucket, see - // SparseBucketsZeroThreshold below). From any one bucket to the next, - // the width of the bucket grows by a constant factor. - // SparseBucketsFactor provides an upper bound for this factor - // (exception see below). The smaller SparseBucketsFactor, the more - // buckets will be used and thus the more costly the histogram will - // become. A generally good trade-off between cost and accuracy is a - // value of 1.1 (each bucket is at most 10% wider than the previous - // one), which will result in each power of two divided into 8 buckets - // (e.g. there will be 8 buckets between 1 and 2, same as between 2 and - // 4, and 4 and 8, etc.). + // (in addition to the regular buckets, if defined above). A histogram + // with sparse buckets will be ingested as a native histogram by a + // Prometheus server with that feature enable. Sparse buckets are + // exponential buckets covering the whole float64 range (with the + // exception of the “zero” bucket, see SparseBucketsZeroThreshold + // below). From any one bucket to the next, the width of the bucket + // grows by a constant factor. SparseBucketsFactor provides an upper + // bound for this factor (exception see below). The smaller + // SparseBucketsFactor, the more buckets will be used and thus the more + // costly the histogram will become. A generally good trade-off between + // cost and accuracy is a value of 1.1 (each bucket is at most 10% wider + // than the previous one), which will result in each power of two + // divided into 8 buckets (e.g. there will be 8 buckets between 1 and 2, + // same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as // 2^(2^n), where n is an integer number between (and including) -8 and @@ -723,8 +724,8 @@ func (h *histogram) Write(out *dto.Metric) error { his.Bucket = append(his.Bucket, b) } if h.sparseSchema > math.MinInt32 { - his.SbZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sparseZeroThresholdBits))) - his.SbSchema = proto.Int32(atomic.LoadInt32(&coldCounts.sparseSchema)) + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sparseZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.sparseSchema)) zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) defer func() { @@ -732,9 +733,9 @@ func (h *histogram) Write(out *dto.Metric) error { coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative, &hotCounts.sparseBucketsNumber)) }() - his.SbZeroCount = proto.Uint64(zeroBucket) - his.SbNegative = makeSparseBuckets(&coldCounts.sparseBucketsNegative) - his.SbPositive = makeSparseBuckets(&coldCounts.sparseBucketsPositive) + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeSparseBuckets(&coldCounts.sparseBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeSparseBuckets(&coldCounts.sparseBucketsPositive) } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1235,7 +1236,7 @@ func pickSparseSchema(bucketFactor float64) int32 { } } -func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { +func makeSparseBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { var ii []int buckets.Range(func(k, v interface{}) bool { ii = append(ii, k.(int)) @@ -1244,16 +1245,19 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { sort.Ints(ii) if len(ii) == 0 { - return nil + return nil, nil } - sbs := dto.SparseBuckets{} - var prevCount int64 - var nextI int + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) appendDelta := func(count int64) { - *sbs.Span[len(sbs.Span)-1].Length++ - sbs.Delta = append(sbs.Delta, count-prevCount) + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) prevCount = count } @@ -1270,7 +1274,7 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { // We have to create a new span, either because we are // at the very beginning, or because we have found a gap // of more than two buckets. - sbs.Span = append(sbs.Span, &dto.SparseBuckets_Span{ + spans = append(spans, &dto.BucketSpan{ Offset: proto.Int32(iDelta), Length: proto.Uint32(0), }) @@ -1284,7 +1288,7 @@ func makeSparseBuckets(buckets *sync.Map) *dto.SparseBuckets { appendDelta(count) nextI = i + 1 } - return &sbs + return spans, deltas } // addToSparseBucket increments the sparse bucket at key by the provided diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index 81803d5..5b26fb4 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -490,13 +490,13 @@ func TestSparseHistogram(t *testing.T) { name: "factor 1.1 results in schema 3", observations: []float64{0, 1, 2, 3}, factor: 1.1, - want: `sample_count:4 sample_sum:6 sb_schema:3 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: span: span: delta:1 delta:0 delta:0 > `, + want: `sample_count:4 sample_sum:6 schema:3 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_span: positive_span: positive_delta:1 positive_delta:0 positive_delta:0 `, }, { name: "factor 1.2 results in schema 2", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2}, factor: 1.2, - want: `sample_count:6 sample_sum:7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:6 sample_sum:7.4 schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, }, { name: "factor 4 results in schema -1", @@ -507,7 +507,7 @@ func TestSparseHistogram(t *testing.T) { 33.33, // Bucket 3: (16, 64] }, factor: 4, - want: `sample_count:10 sample_sum:62.83 sb_schema:-1 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:2 delta:2 delta:-1 delta:-2 > `, + want: `sample_count:10 sample_sum:62.83 schema:-1 zero_threshold:2.938735877055719e-39 zero_count:0 positive_span: positive_delta:2 positive_delta:2 positive_delta:-1 positive_delta:-2 `, }, { name: "factor 17 results in schema -2", @@ -517,58 +517,58 @@ func TestSparseHistogram(t *testing.T) { 33.33, // Bucket 2: (16, 256] }, factor: 17, - want: `sample_count:10 sample_sum:62.83 sb_schema:-2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:2 delta:5 delta:-6 > `, + want: `sample_count:10 sample_sum:62.83 schema:-2 zero_threshold:2.938735877055719e-39 zero_count:0 positive_span: positive_delta:2 positive_delta:5 positive_delta:-6 `, }, { name: "negative buckets", observations: []float64{0, -1, -1.2, -1.4, -1.8, -2}, factor: 1.2, - want: `sample_count:6 sample_sum:-7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:6 sample_sum:-7.4 schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 negative_delta:-1 negative_delta:2 negative_delta:-2 negative_delta:2 `, }, { name: "negative and positive buckets", observations: []float64{0, -1, -1.2, -1.4, -1.8, -2, 1, 1.2, 1.4, 1.8, 2}, factor: 1.2, - want: `sample_count:11 sample_sum:0 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:-1 delta:2 delta:-2 delta:2 > sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:11 sample_sum:0 schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 negative_delta:-1 negative_delta:2 negative_delta:-2 negative_delta:2 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, }, { name: "wide zero bucket", observations: []float64{0, -1, -1.2, -1.4, -1.8, -2, 1, 1.2, 1.4, 1.8, 2}, factor: 1.2, zeroThreshold: 1.4, - want: `sample_count:11 sample_sum:0 sb_schema:2 sb_zero_threshold:1.4 sb_zero_count:7 sb_negative: delta:2 > sb_positive: delta:2 > `, + want: `sample_count:11 sample_sum:0 schema:2 zero_threshold:1.4 zero_count:7 negative_span: negative_delta:2 positive_span: positive_delta:2 `, }, { name: "NaN observation", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.NaN()}, factor: 1.2, - want: `sample_count:7 sample_sum:nan sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:7 sample_sum:nan schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, }, { name: "+Inf observation", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.Inf(+1)}, factor: 1.2, - want: `sample_count:7 sample_sum:inf sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: span: delta:1 delta:-1 delta:2 delta:-2 delta:2 delta:-1 > `, + want: `sample_count:7 sample_sum:inf schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 positive_delta:-1 `, }, { name: "-Inf observation", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.Inf(-1)}, factor: 1.2, - want: `sample_count:7 sample_sum:-inf sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 > sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:7 sample_sum:-inf schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, }, { name: "limited buckets but nothing triggered", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2}, factor: 1.2, maxBuckets: 4, - want: `sample_count:6 sample_sum:7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:6 sample_sum:7.4 schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, }, { name: "buckets limited by halving resolution", observations: []float64{0, 1, 1.1, 1.2, 1.4, 1.8, 2, 3}, factor: 1.2, maxBuckets: 4, - want: `sample_count:8 sample_sum:11.5 sb_schema:1 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_positive: delta:1 delta:2 delta:-1 delta:-2 delta:1 > `, + want: `sample_count:8 sample_sum:11.5 schema:1 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_delta:1 positive_delta:2 positive_delta:-1 positive_delta:-2 positive_delta:1 `, }, { name: "buckets limited by widening the zero bucket", @@ -576,7 +576,7 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, maxBuckets: 4, maxZeroThreshold: 1.2, - want: `sample_count:8 sample_sum:11.5 sb_schema:2 sb_zero_threshold:1 sb_zero_count:2 sb_positive: delta:1 delta:1 delta:-2 delta:2 delta:-2 delta:0 delta:1 > `, + want: `sample_count:8 sample_sum:11.5 schema:2 zero_threshold:1 zero_count:2 positive_span: positive_delta:1 positive_delta:1 positive_delta:-2 positive_delta:2 positive_delta:-2 positive_delta:0 positive_delta:1 `, }, { name: "buckets limited by widening the zero bucket twice", @@ -584,7 +584,7 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, maxBuckets: 4, maxZeroThreshold: 1.2, - want: `sample_count:9 sample_sum:15.5 sb_schema:2 sb_zero_threshold:1.189207115002721 sb_zero_count:3 sb_positive: delta:2 delta:-2 delta:2 delta:-2 delta:0 delta:1 delta:0 > `, + want: `sample_count:9 sample_sum:15.5 schema:2 zero_threshold:1.189207115002721 zero_count:3 positive_span: positive_delta:2 positive_delta:-2 positive_delta:2 positive_delta:-2 positive_delta:0 positive_delta:1 positive_delta:0 `, }, { name: "buckets limited by reset", @@ -593,21 +593,21 @@ func TestSparseHistogram(t *testing.T) { maxBuckets: 4, maxZeroThreshold: 1.2, minResetDuration: 5 * time.Minute, - want: `sample_count:2 sample_sum:7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:1 delta:0 > `, + want: `sample_count:2 sample_sum:7 schema:2 zero_threshold:2.938735877055719e-39 zero_count:0 positive_span: positive_delta:1 positive_delta:0 `, }, { name: "limited buckets but nothing triggered, negative observations", observations: []float64{0, -1, -1.2, -1.4, -1.8, -2}, factor: 1.2, maxBuckets: 4, - want: `sample_count:6 sample_sum:-7.4 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:-1 delta:2 delta:-2 delta:2 > `, + want: `sample_count:6 sample_sum:-7.4 schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 negative_delta:-1 negative_delta:2 negative_delta:-2 negative_delta:2 `, }, { name: "buckets limited by halving resolution, negative observations", observations: []float64{0, -1, -1.1, -1.2, -1.4, -1.8, -2, -3}, factor: 1.2, maxBuckets: 4, - want: `sample_count:8 sample_sum:-11.5 sb_schema:1 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:1 sb_negative: delta:1 delta:2 delta:-1 delta:-2 delta:1 > `, + want: `sample_count:8 sample_sum:-11.5 schema:1 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 negative_delta:2 negative_delta:-1 negative_delta:-2 negative_delta:1 `, }, { name: "buckets limited by widening the zero bucket, negative observations", @@ -615,7 +615,7 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, maxBuckets: 4, maxZeroThreshold: 1.2, - want: `sample_count:8 sample_sum:-11.5 sb_schema:2 sb_zero_threshold:1 sb_zero_count:2 sb_negative: delta:1 delta:1 delta:-2 delta:2 delta:-2 delta:0 delta:1 > `, + want: `sample_count:8 sample_sum:-11.5 schema:2 zero_threshold:1 zero_count:2 negative_span: negative_delta:1 negative_delta:1 negative_delta:-2 negative_delta:2 negative_delta:-2 negative_delta:0 negative_delta:1 `, }, { name: "buckets limited by widening the zero bucket twice, negative observations", @@ -623,7 +623,7 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, maxBuckets: 4, maxZeroThreshold: 1.2, - want: `sample_count:9 sample_sum:-15.5 sb_schema:2 sb_zero_threshold:1.189207115002721 sb_zero_count:3 sb_negative: delta:2 delta:-2 delta:2 delta:-2 delta:0 delta:1 delta:0 > `, + want: `sample_count:9 sample_sum:-15.5 schema:2 zero_threshold:1.189207115002721 zero_count:3 negative_span: negative_delta:2 negative_delta:-2 negative_delta:2 negative_delta:-2 negative_delta:0 negative_delta:1 negative_delta:0 `, }, { name: "buckets limited by reset, negative observations", @@ -632,7 +632,7 @@ func TestSparseHistogram(t *testing.T) { maxBuckets: 4, maxZeroThreshold: 1.2, minResetDuration: 5 * time.Minute, - want: `sample_count:2 sample_sum:-7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_negative: delta:1 delta:0 > `, + want: `sample_count:2 sample_sum:-7 schema:2 zero_threshold:2.938735877055719e-39 zero_count:0 negative_span: negative_delta:1 negative_delta:0 `, }, { name: "buckets limited by halving resolution, then reset", @@ -640,7 +640,7 @@ func TestSparseHistogram(t *testing.T) { factor: 1.2, maxBuckets: 4, minResetDuration: 9 * time.Minute, - want: `sample_count:2 sample_sum:7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:1 delta:0 > `, + want: `sample_count:2 sample_sum:7 schema:2 zero_threshold:2.938735877055719e-39 zero_count:0 positive_span: positive_delta:1 positive_delta:0 `, }, { name: "buckets limited by widening the zero bucket, then reset", @@ -649,7 +649,7 @@ func TestSparseHistogram(t *testing.T) { maxBuckets: 4, maxZeroThreshold: 1.2, minResetDuration: 9 * time.Minute, - want: `sample_count:2 sample_sum:7 sb_schema:2 sb_zero_threshold:2.938735877055719e-39 sb_zero_count:0 sb_positive: delta:1 delta:0 > `, + want: `sample_count:2 sample_sum:7 schema:2 zero_threshold:2.938735877055719e-39 zero_count:0 positive_span: positive_delta:1 positive_delta:0 `, }, } @@ -754,9 +754,9 @@ func TestSparseHistogramConcurrency(t *testing.T) { // t.Errorf("got sample sum %f, want %f", got, want) // } - sumBuckets := int(m.Histogram.GetSbZeroCount()) + sumBuckets := int(m.Histogram.GetZeroCount()) current := 0 - for _, delta := range m.Histogram.GetSbNegative().GetDelta() { + for _, delta := range m.Histogram.GetNegativeDelta() { current += int(delta) if current < 0 { t.Fatalf("negative bucket population negative: %d", current) @@ -764,7 +764,7 @@ func TestSparseHistogramConcurrency(t *testing.T) { sumBuckets += current } current = 0 - for _, delta := range m.Histogram.GetSbPositive().GetDelta() { + for _, delta := range m.Histogram.GetPositiveDelta() { current += int(delta) if current < 0 { t.Fatalf("positive bucket population negative: %d", current) From 6942f9e454fc3ecc27dbcedf8e6d2ae234918fb5 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 5 Oct 2022 15:37:48 +0200 Subject: [PATCH 21/25] sparse buckets: Fix handling of +Inf/-Inf/NaN observations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NaN observations now go to no bucket, but increment count (and effectively set sum to NaN, too). ±Inf observations now go to the bucket following the bucket that would have received math.MaxFloat64. The former is now the last bucket that can be created. The getLe is modified to return math.MaxFloat64 for the penultimate possible bucket. Also add a test for getLe. Signed-off-by: beorn7 --- prometheus/histogram.go | 85 ++++++++++++++++++++++++++------ prometheus/histogram_test.go | 95 +++++++++++++++++++++++++++++++++++- 2 files changed, 162 insertions(+), 18 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index eb80fad..6665328 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -577,21 +577,27 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { atomic.AddUint64(&hc.buckets[bucket], 1) } atomicAddFloat(&hc.sumBits, v) - if doSparse { + if doSparse && !math.IsNaN(v) { var ( - sparseKey int - sparseSchema = atomic.LoadInt32(&hc.sparseSchema) - sparseZeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.sparseZeroThresholdBits)) - frac, exp = math.Frexp(math.Abs(v)) - bucketCreated bool + sparseKey int + sparseSchema = atomic.LoadInt32(&hc.sparseSchema) + sparseZeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.sparseZeroThresholdBits)) + bucketCreated, isInf bool ) - switch { - case math.IsInf(v, 0): - sparseKey = math.MaxInt32 // Largest possible sparseKey. - case sparseSchema > 0: + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment sparseKey by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if sparseSchema > 0 { bounds := sparseBounds[sparseSchema] sparseKey = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) - default: + } else { sparseKey = exp if frac == 0.5 { sparseKey-- @@ -599,6 +605,9 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { div := 1 << -sparseSchema sparseKey = (sparseKey + div - 1) / div } + if isInf { + sparseKey++ + } switch { case v > sparseZeroThreshold: bucketCreated = addToSparseBucket(&hc.sparseBucketsPositive, sparseKey, 1) @@ -1062,7 +1071,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -1073,7 +1083,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -1219,8 +1230,8 @@ func (s buckSort) Less(i, j int) bool { // 2^(2^-n) is less or equal the provided bucketFactor. // // Special cases: -// - bucketFactor <= 1: panics. -// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. func pickSparseSchema(bucketFactor float64) int32 { if bucketFactor <= 1 { panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) @@ -1346,13 +1357,55 @@ func findSmallestKey(m *sync.Map) int { } func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. if schema < 0 { - return math.Ldexp(1, key<<(-schema)) + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) } fracIdx := key & ((1 << schema) - 1) frac := sparseBounds[schema][fracIdx] exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } return math.Ldexp(frac, exp) } diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index 5b26fb4..fa80249 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -548,13 +548,13 @@ func TestSparseHistogram(t *testing.T) { name: "+Inf observation", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.Inf(+1)}, factor: 1.2, - want: `sample_count:7 sample_sum:inf schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 positive_delta:-1 `, + want: `sample_count:7 sample_sum:inf schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 positive_span: positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 positive_delta:-1 `, }, { name: "-Inf observation", observations: []float64{0, 1, 1.2, 1.4, 1.8, 2, math.Inf(-1)}, factor: 1.2, - want: `sample_count:7 sample_sum:-inf schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, + want: `sample_count:7 sample_sum:-inf schema:2 zero_threshold:2.938735877055719e-39 zero_count:1 negative_span: negative_delta:1 positive_span: positive_delta:1 positive_delta:-1 positive_delta:2 positive_delta:-2 positive_delta:2 `, }, { name: "limited buckets but nothing triggered", @@ -782,3 +782,94 @@ func TestSparseHistogramConcurrency(t *testing.T) { t.Error(err) } } + +func TestGetLe(t *testing.T) { + scenarios := []struct { + key int + schema int32 + want float64 + }{ + { + key: -1, + schema: -1, + want: 0.25, + }, + { + key: 0, + schema: -1, + want: 1, + }, + { + key: 1, + schema: -1, + want: 4, + }, + { + key: 512, + schema: -1, + want: math.MaxFloat64, + }, + { + key: 513, + schema: -1, + want: math.Inf(+1), + }, + { + key: -1, + schema: 0, + want: 0.5, + }, + { + key: 0, + schema: 0, + want: 1, + }, + { + key: 1, + schema: 0, + want: 2, + }, + { + key: 1024, + schema: 0, + want: math.MaxFloat64, + }, + { + key: 1025, + schema: 0, + want: math.Inf(+1), + }, + { + key: -1, + schema: 2, + want: 0.8408964152537144, + }, + { + key: 0, + schema: 2, + want: 1, + }, + { + key: 1, + schema: 2, + want: 1.189207115002721, + }, + { + key: 4096, + schema: 2, + want: math.MaxFloat64, + }, + { + key: 4097, + schema: 2, + want: math.Inf(+1), + }, + } + + for i, s := range scenarios { + got := getLe(s.key, s.schema) + if s.want != got { + t.Errorf("%d. key %d, schema %d, want upper bound of %g, got %g", i, s.key, s.schema, s.want, got) + } + } +} From 4e71e6ff20034b5b046e4e2a3f627eb9aff9085e Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 19 Oct 2022 18:14:37 +0200 Subject: [PATCH 22/25] Update prometheus/client_model dependency Native histograms are now in a tagged version (v0.3.0). Signed-off-by: beorn7 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a43b433..fe48634 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/golang/protobuf v1.5.2 github.com/json-iterator/go v1.1.12 - github.com/prometheus/client_model v0.2.1-0.20220719122737-1f8dcad1221e + github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.37.0 github.com/prometheus/procfs v0.8.0 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a diff --git a/go.sum b/go.sum index 1761cf7..44e3901 100644 --- a/go.sum +++ b/go.sum @@ -135,8 +135,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.1-0.20220719122737-1f8dcad1221e h1:KjoQdMEQmNC8smQ731iHAXnbFbApg4uu60fNcWHs3Bk= -github.com/prometheus/client_model v0.2.1-0.20220719122737-1f8dcad1221e/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= From 58a8ca4588dbaa27f1b39e7d1ff9e6e1683d00bb Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 19 Oct 2022 18:38:57 +0200 Subject: [PATCH 23/25] examples: Adjust doc comment for native histograms Signed-off-by: beorn7 --- examples/random/main.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/examples/random/main.go b/examples/random/main.go index 72f1f65..1329514 100644 --- a/examples/random/main.go +++ b/examples/random/main.go @@ -53,10 +53,17 @@ func main() { }, []string{"service"}, ) - // The same as above, but now as a histogram, and only for the normal - // distribution. The buckets are targeted to the parameters of the - // normal distribution, with 20 buckets centered on the mean, each - // half-sigma wide. + // The same as above, but now as a histogram, and only for the + // normal distribution. The histogram features both conventional + // buckets as well as sparse buckets, the latter needed for the + // experimental native histograms (ingested by a Prometheus + // server v2.40 with the corresponding feature flag + // enabled). The conventional buckets are targeted to the + // parameters of the normal distribution, with 20 buckets + // centered on the mean, each half-sigma wide. The sparse + // buckets are always centered on zero, with a growth factor of + // one bucket to the text of (at most) 1.1. (The precise factor + // is 2^2^-3 = 1.0905077...) rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "rpc_durations_histogram_seconds", Help: "RPC latency distributions.", From d31f13b599f6c46507468385a60571c2a4916c41 Mon Sep 17 00:00:00 2001 From: beorn7 Date: Wed, 19 Oct 2022 19:02:43 +0200 Subject: [PATCH 24/25] Add SparseBucketsZeroThresholdZero and groom doc comments Signed-off-by: beorn7 --- prometheus/histogram.go | 109 ++++++++++++++++++++++++---------------- 1 file changed, 65 insertions(+), 44 deletions(-) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 309534f..9452d4e 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -217,18 +217,22 @@ var sparseBounds = [][]float64{ // } // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of +// configurable buckets. Similar to a Summary, it also provides a sum of // observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure so-called sparse buckets in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) +// +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -238,7 +242,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -261,14 +266,19 @@ var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} // which is a bucket boundary at all possible resolutions. const DefSparseBucketsZeroThreshold = 2.938735877055719e-39 +// SparseBucketsZeroThresholdZero can be used as SparseBucketsZeroThreshold in +// the HistogramOpts to create a zero bucket of width zero, i.e. a zero bucket +// that only receives observations of precisely zero. +const SparseBucketsZeroThresholdZero = -1 + var errBucketLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -283,11 +293,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -382,20 +392,21 @@ type HistogramOpts struct { Buckets []float64 // If SparseBucketsFactor is greater than one, sparse buckets are used - // (in addition to the regular buckets, if defined above). A histogram - // with sparse buckets will be ingested as a native histogram by a - // Prometheus server with that feature enable. Sparse buckets are - // exponential buckets covering the whole float64 range (with the - // exception of the “zero” bucket, see SparseBucketsZeroThreshold - // below). From any one bucket to the next, the width of the bucket - // grows by a constant factor. SparseBucketsFactor provides an upper - // bound for this factor (exception see below). The smaller - // SparseBucketsFactor, the more buckets will be used and thus the more - // costly the histogram will become. A generally good trade-off between - // cost and accuracy is a value of 1.1 (each bucket is at most 10% wider - // than the previous one), which will result in each power of two - // divided into 8 buckets (e.g. there will be 8 buckets between 1 and 2, - // same as between 2 and 4, and 4 and 8, etc.). + // (in addition to the regular buckets, if defined above). A Histogram + // with sparse buckets will be ingested as a Native Histogram by a + // Prometheus server with that feature enabled (requires Prometheus + // v2.40+). Sparse buckets are exponential buckets covering the whole + // float64 range (with the exception of the “zero” bucket, see + // SparseBucketsZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. SparseBucketsFactor provides an upper bound for this factor + // (exception see below). The smaller SparseBucketsFactor, the more + // buckets will be used and thus the more costly the histogram will + // become. A generally good trade-off between cost and accuracy is a + // value of 1.1 (each bucket is at most 10% wider than the previous + // one), which will result in each power of two divided into 8 buckets + // (e.g. there will be 8 buckets between 1 and 2, same as between 2 and + // 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as // 2^(2^n), where n is an integer number between (and including) -8 and @@ -405,28 +416,38 @@ type HistogramOpts struct { // SparseBucketsFactor is greater than 1 but smaller than 2^(2^-8), then // the actually used factor is still 2^(2^-8) even though it is larger // than the provided SparseBucketsFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all SparseBucket... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. SparseBucketsFactor float64 // All observations with an absolute value of less or equal // SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For // best results, this should be close to a bucket boundary. This is // usually the case if picking a power of two. If // SparseBucketsZeroThreshold is left at zero, - // DefSparseBucketsZeroThreshold is used as the threshold. If it is set - // to a negative value, a threshold of zero is used, i.e. only - // observations of precisely zero will go into the zero - // bucket. (TODO(beorn7): That's obviously weird and just a consequence - // of making the zero value of HistogramOpts meaningful. Has to be - // solved more elegantly in the final version.) + // DefSparseBucketsZeroThreshold is used as the threshold. To configure + // a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // SparseBucketsZeroThreshold to the SparseBucketsZeroThresholdZero + // constant (or any negative float value). SparseBucketsZeroThreshold float64 // The remaining fields define a strategy to limit the number of // populated sparse buckets. If SparseBucketsMaxNumber is left at zero, - // the number of buckets is not limited. Otherwise, once the provided - // number is exceeded, the following strategy is enacted: First, if the - // last reset (or the creation) of the histogram is at least - // SparseBucketsMinResetDuration ago, then the whole histogram is reset - // to its initial state (including regular buckets). If less time has - // passed, or if SparseBucketsMinResetDuration is zero, no reset is + // the number of buckets is not limited. (Note that this might lead to + // unbounded memory consumption if the values observed by the Histogram + // are sufficiently wide-spread. In particular, this could be used as a + // DoS attack vector. Where the observed values depend on external + // inputs, it is highly recommended to set a SparseBucketsMaxNumber.) + // Once the set SparseBucketsMaxNumber is exceeded, the following + // strategy is enacted: First, if the last reset (or the creation) of + // the histogram is at least SparseBucketsMinResetDuration ago, then the + // whole histogram is reset to its initial state (including regular + // buckets). If less time has passed, or if + // SparseBucketsMinResetDuration is zero, no reset is // performed. Instead, the zero threshold is increased sufficiently to // reduce the number of buckets to or below SparseBucketsMaxNumber, but // not to more than SparseBucketsMaxZeroThreshold. Thus, if From e92a8c7f4836260163f82ba84f33b62c2787af2d Mon Sep 17 00:00:00 2001 From: beorn7 Date: Thu, 27 Oct 2022 22:31:38 +0200 Subject: [PATCH 25/25] Avoid the term 'sparse' where possible This intends to avoid confusing users by the subtle difference between a native histogram and a sparse bucket. Signed-off-by: beorn7 --- examples/random/main.go | 8 +- prometheus/histogram.go | 440 ++++++++++++++++++----------------- prometheus/histogram_test.go | 28 +-- 3 files changed, 243 insertions(+), 233 deletions(-) diff --git a/examples/random/main.go b/examples/random/main.go index 1329514..13cd8bb 100644 --- a/examples/random/main.go +++ b/examples/random/main.go @@ -65,10 +65,10 @@ func main() { // one bucket to the text of (at most) 1.1. (The precise factor // is 2^2^-3 = 1.0905077...) rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "rpc_durations_histogram_seconds", - Help: "RPC latency distributions.", - Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), - SparseBucketsFactor: 1.1, + Name: "rpc_durations_histogram_seconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + NativeHistogramBucketFactor: 1.1, }) ) diff --git a/prometheus/histogram.go b/prometheus/histogram.go index 9452d4e..4c873a0 100644 --- a/prometheus/histogram.go +++ b/prometheus/histogram.go @@ -28,16 +28,16 @@ import ( dto "github.com/prometheus/client_model/go" ) -// sparseBounds for the frac of observed values. Only relevant for schema > 0. -// Position in the slice is the schema. (0 is never used, just here for -// convenience of using the schema directly as the index.) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) // // TODO(beorn7): Currently, we do a binary search into these slices. There are // ways to turn it into a small number of simple array lookups. It probably only // matters for schema 5 and beyond, but should be investigated. See this comment // as a starting point: // https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 -var sparseBounds = [][]float64{ +var nativeHistogramBounds = [][]float64{ // Schema "0": {0.5}, // Schema 1: @@ -190,35 +190,40 @@ var sparseBounds = [][]float64{ }, } -// The sparseBounds above can be generated with the code below. -// TODO(beorn7): Actually do it via go generate. +// The nativeHistogramBounds above can be generated with the code below. // -// var sparseBounds [][]float64 = make([][]float64, 9) +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) // // func init() { -// // Populate sparseBounds. +// // Populate nativeHistogramBounds. // numBuckets := 1 -// for i := range sparseBounds { +// for i := range nativeHistogramBounds { // bounds := []float64{0.5} // factor := math.Exp2(math.Exp2(float64(-i))) // for j := 0; j < numBuckets-1; j++ { // var bound float64 // if (j+1)%2 == 0 { // // Use previously calculated value for increased precision. -// bound = sparseBounds[i-1][j/2+1] +// bound = nativeHistogramBounds[i-1][j/2+1] // } else { // bound = bounds[j] * factor // } // bounds = append(bounds, bound) // } // numBuckets *= 2 -// sparseBounds[i] = bounds +// nativeHistogramBounds[i] = bounds // } // } // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a Summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using // the histogram_quantile PromQL function. @@ -227,7 +232,7 @@ var sparseBounds = [][]float64{ // (see the documentation for detailed procedures). However, Histograms require // the user to pre-define suitable buckets, and they are in general less // accurate. (Both problems are addressed by the experimental Native -// Histograms. To use them, configure so-called sparse buckets in the +// Histograms. To use them, configure a NativeHistogramBucketFactor in the // HistogramOpts. They also require a Prometheus server v2.40+ with the // corresponding feature flag enabled.) // @@ -259,17 +264,17 @@ const bucketLabel = "le" // customized to your use case. var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} -// DefSparseBucketsZeroThreshold is the default value for -// SparseBucketsZeroThreshold in the HistogramOpts. +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. // // The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), // which is a bucket boundary at all possible resolutions. -const DefSparseBucketsZeroThreshold = 2.938735877055719e-39 +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 -// SparseBucketsZeroThresholdZero can be used as SparseBucketsZeroThreshold in -// the HistogramOpts to create a zero bucket of width zero, i.e. a zero bucket -// that only receives observations of precisely zero. -const SparseBucketsZeroThresholdZero = -1 +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 var errBucketLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in histograms", bucketLabel, @@ -385,81 +390,83 @@ type HistogramOpts struct { // to add a highest bucket with +Inf bound, it will be added // implicitly. If Buckets is left as nil or set to a slice of length // zero, it is replaced by default buckets. The default buckets are - // DefBuckets if no sparse buckets (see below) are used, otherwise the - // default is no buckets. (In other words, if you want to use both - // reguler buckets and sparse buckets, you have to define the regular - // buckets here explicitly.) + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both reguler buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 - // If SparseBucketsFactor is greater than one, sparse buckets are used - // (in addition to the regular buckets, if defined above). A Histogram - // with sparse buckets will be ingested as a Native Histogram by a - // Prometheus server with that feature enabled (requires Prometheus - // v2.40+). Sparse buckets are exponential buckets covering the whole - // float64 range (with the exception of the “zero” bucket, see + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see // SparseBucketsZeroThreshold below). From any one bucket to the next, // the width of the bucket grows by a constant - // factor. SparseBucketsFactor provides an upper bound for this factor - // (exception see below). The smaller SparseBucketsFactor, the more - // buckets will be used and thus the more costly the histogram will - // become. A generally good trade-off between cost and accuracy is a - // value of 1.1 (each bucket is at most 10% wider than the previous - // one), which will result in each power of two divided into 8 buckets - // (e.g. there will be 8 buckets between 1 and 2, same as between 2 and - // 4, and 4 and 8, etc.). + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as // 2^(2^n), where n is an integer number between (and including) -8 and // 4. n is chosen so that the resulting factor is the largest that is - // still smaller or equal to SparseBucketsFactor. Note that the smallest - // possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) ). If - // SparseBucketsFactor is greater than 1 but smaller than 2^(2^-8), then - // the actually used factor is still 2^(2^-8) even though it is larger - // than the provided SparseBucketsFactor. + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. // // NOTE: Native Histograms are still an experimental feature. Their // behavior might still change without a major version - // bump. Subsequently, all SparseBucket... options here might still + // bump. Subsequently, all NativeHistogram... options here might still // change their behavior or name (or might completely disappear) without // a major version bump. - SparseBucketsFactor float64 + NativeHistogramBucketFactor float64 // All observations with an absolute value of less or equal - // SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For - // best results, this should be close to a bucket boundary. This is - // usually the case if picking a power of two. If - // SparseBucketsZeroThreshold is left at zero, + // NativeHistogramZeroThreshold are accumulated into a “zero” + // bucket. For best results, this should be close to a bucket + // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, // DefSparseBucketsZeroThreshold is used as the threshold. To configure // a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set - // SparseBucketsZeroThreshold to the SparseBucketsZeroThresholdZero + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // constant (or any negative float value). - SparseBucketsZeroThreshold float64 + NativeHistogramZeroThreshold float64 // The remaining fields define a strategy to limit the number of - // populated sparse buckets. If SparseBucketsMaxNumber is left at zero, - // the number of buckets is not limited. (Note that this might lead to - // unbounded memory consumption if the values observed by the Histogram - // are sufficiently wide-spread. In particular, this could be used as a - // DoS attack vector. Where the observed values depend on external - // inputs, it is highly recommended to set a SparseBucketsMaxNumber.) - // Once the set SparseBucketsMaxNumber is exceeded, the following - // strategy is enacted: First, if the last reset (or the creation) of - // the histogram is at least SparseBucketsMinResetDuration ago, then the - // whole histogram is reset to its initial state (including regular + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: First, if the last reset (or the creation) of the histogram + // is at least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular // buckets). If less time has passed, or if - // SparseBucketsMinResetDuration is zero, no reset is + // NativeHistogramMinResetDuration is zero, no reset is // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below SparseBucketsMaxNumber, but - // not to more than SparseBucketsMaxZeroThreshold. Thus, if - // SparseBucketsMaxZeroThreshold is already at or below the current zero - // threshold, nothing happens at this step. After that, if the number of - // buckets still exceeds SparseBucketsMaxNumber, the resolution of the - // histogram is reduced by doubling the width of the sparse buckets (up - // to a growth factor between one bucket to the next of 2^(2^4) = 65536, - // see above). - SparseBucketsMaxNumber uint32 - SparseBucketsMinResetDuration time.Duration - SparseBucketsMaxZeroThreshold float64 + // reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. After that, if the + // number of buckets still exceeds NativeHistogramMaxBucketNumber, the + // resolution of the histogram is reduced by doubling the width of the + // sparse buckets (up to a growth factor between one bucket to the next + // of 2^(2^4) = 65536, see above). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -497,28 +504,28 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - sparseMaxBuckets: opts.SparseBucketsMaxNumber, - sparseMaxZeroThreshold: opts.SparseBucketsMaxZeroThreshold, - sparseMinResetDuration: opts.SparseBucketsMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: time.Now(), + now: time.Now, } - if len(h.upperBounds) == 0 && opts.SparseBucketsFactor <= 1 { + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets } - if opts.SparseBucketsFactor <= 1 { - h.sparseSchema = math.MinInt32 // To mark that there are no sparse buckets. + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. } else { switch { - case opts.SparseBucketsZeroThreshold > 0: - h.sparseZeroThreshold = opts.SparseBucketsZeroThreshold - case opts.SparseBucketsZeroThreshold == 0: - h.sparseZeroThreshold = DefSparseBucketsZeroThreshold - } // Leave h.sparseThreshold at 0 otherwise. - h.sparseSchema = pickSparseSchema(opts.SparseBucketsFactor) + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -538,14 +545,14 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: h.counts[0] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - sparseZeroThresholdBits: math.Float64bits(h.sparseZeroThreshold), - sparseSchema: h.sparseSchema, + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, } h.counts[1] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - sparseZeroThresholdBits: math.Float64bits(h.sparseZeroThreshold), - sparseSchema: h.sparseSchema, + buckets: make([]uint64, len(h.upperBounds)), + nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), + nativeHistogramSchema: h.nativeHistogramSchema, } h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) @@ -562,36 +569,38 @@ type histogramCounts struct { sumBits uint64 count uint64 - // sparseZeroBucket counts all (positive and negative) observations in - // the zero bucket (with an absolute value less or equal the current - // threshold, see next field. - sparseZeroBucket uint64 - // sparseZeroThresholdBits is the bit pattern of the current threshold - // for the zero bucket. It's initially equal to sparseZeroThreshold but - // may change according to the bucket count limitation strategy. - sparseZeroThresholdBits uint64 - // sparseSchema may change over time according to the bucket count - // limitation strategy and therefore has to be saved here. - sparseSchema int32 + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 // Number of (positive and negative) sparse buckets. - sparseBucketsNumber uint32 + nativeHistogramBucketsNumber uint32 // Regular buckets. buckets []uint64 - // Sparse buckets are implemented with a sync.Map for now. A dedicated - // data structure will likely be more efficient. There are separate maps - // for negative and positive observations. The map's value is an *int64, - // counting observations in that bucket. (Note that we don't use uint64 - // as an int64 won't overflow in practice, and working with signed - // numbers from the beginning simplifies the handling of deltas.) The - // map's key is the index of the bucket according to the used - // sparseSchema. Index 0 is for an upper bound of 1. - sparseBucketsPositive, sparseBucketsNegative sync.Map + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map } // observe manages the parts of observe that only affects -// histogramCounts. doSparse is true if spare buckets should be done, +// histogramCounts. doSparse is true if sparse buckets should be done, // too. func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { if bucket < len(hc.buckets) { @@ -600,13 +609,13 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { atomicAddFloat(&hc.sumBits, v) if doSparse && !math.IsNaN(v) { var ( - sparseKey int - sparseSchema = atomic.LoadInt32(&hc.sparseSchema) - sparseZeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.sparseZeroThresholdBits)) + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) bucketCreated, isInf bool ) if math.IsInf(v, 0) { - // Pretend v is MaxFloat64 but later increment sparseKey by one. + // Pretend v is MaxFloat64 but later increment key by one. if math.IsInf(v, +1) { v = math.MaxFloat64 } else { @@ -615,30 +624,30 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { isInf = true } frac, exp := math.Frexp(math.Abs(v)) - if sparseSchema > 0 { - bounds := sparseBounds[sparseSchema] - sparseKey = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) } else { - sparseKey = exp + key = exp if frac == 0.5 { - sparseKey-- + key-- } - div := 1 << -sparseSchema - sparseKey = (sparseKey + div - 1) / div + div := 1 << -schema + key = (key + div - 1) / div } if isInf { - sparseKey++ + key++ } switch { - case v > sparseZeroThreshold: - bucketCreated = addToSparseBucket(&hc.sparseBucketsPositive, sparseKey, 1) - case v < -sparseZeroThreshold: - bucketCreated = addToSparseBucket(&hc.sparseBucketsNegative, sparseKey, 1) + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) default: - atomic.AddUint64(&hc.sparseZeroBucket, 1) + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) } if bucketCreated { - atomic.AddUint32(&hc.sparseBucketsNumber, 1) + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) } } // Increment count last as we take it as a signal that the observation @@ -677,15 +686,15 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - sparseSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. - sparseZeroThreshold float64 // The initial zero threshold. - sparseMaxZeroThreshold float64 - sparseMaxBuckets uint32 - sparseMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + lastResetTime time.Time // Protected by mtx. now func() time.Time // To mock out time.Now() for testing. } @@ -753,19 +762,19 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } - if h.sparseSchema > math.MinInt32 { - his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sparseZeroThresholdBits))) - his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.sparseSchema)) - zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket) + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) defer func() { - coldCounts.sparseBucketsPositive.Range(addAndReset(&hotCounts.sparseBucketsPositive, &hotCounts.sparseBucketsNumber)) - coldCounts.sparseBucketsNegative.Range(addAndReset(&hotCounts.sparseBucketsNegative, &hotCounts.sparseBucketsNumber)) + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) }() his.ZeroCount = proto.Uint64(zeroBucket) - his.NegativeSpan, his.NegativeDelta = makeSparseBuckets(&coldCounts.sparseBucketsNegative) - his.PositiveSpan, his.PositiveDelta = makeSparseBuckets(&coldCounts.sparseBucketsPositive) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) } addAndResetCounts(hotCounts, coldCounts) return nil @@ -789,7 +798,7 @@ func (h *histogram) findBucket(v float64) int { // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { // Do not add to sparse buckets for NaN observations. - doSparse := h.sparseSchema > math.MinInt32 && !math.IsNaN(v) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. @@ -797,7 +806,7 @@ func (h *histogram) observe(v float64, bucket int) { hotCounts := h.counts[n>>63] hotCounts.observe(v, bucket, doSparse) if doSparse { - h.limitSparseBuckets(hotCounts, v, bucket) + h.limitBuckets(hotCounts, v, bucket) } } @@ -806,11 +815,11 @@ func (h *histogram) observe(v float64, bucket int) { // number can go higher (if even the lowest resolution isn't enough to reduce // the number sufficiently, or if the provided counts aren't fully updated yet // by a concurrently happening Write call). -func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, bucket int) { - if h.sparseMaxBuckets == 0 { +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { return // No limit configured. } - if h.sparseMaxBuckets >= atomic.LoadUint32(&counts.sparseBucketsNumber) { + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { return // Bucket limit not exceeded yet. } @@ -825,7 +834,7 @@ func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, b hotCounts := h.counts[hotIdx] coldCounts := h.counts[coldIdx] // ...and then check again if we really have to reduce the bucket count. - if h.sparseMaxBuckets >= atomic.LoadUint32(&hotCounts.sparseBucketsNumber) { + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { return // Bucket limit not exceeded after all. } // Try the various strategies in order. @@ -838,13 +847,13 @@ func (h *histogram) limitSparseBuckets(counts *histogramCounts, value float64, b h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resests the whole histogram if at least h.sparseMinResetDuration +// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration // has been passed. It returns true if the histogram has been reset. The caller // must have locked h.mtx. func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.sparseMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.sparseMinResetDuration { + if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } // Completely reset coldCounts. @@ -864,34 +873,35 @@ func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be -// merged into the new wider zero bucket). h.sparseMaxZeroThreshold limits how -// far the zero bucket can be extended, and if that's not enough to include an -// existing bucket, the method returns false. The caller must have locked h.mtx. +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { - currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.sparseZeroThresholdBits)) - if currentZeroThreshold >= h.sparseMaxZeroThreshold { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { return false } // Find the key of the bucket closest to zero. - smallestKey := findSmallestKey(&hot.sparseBucketsPositive) - smallestNegativeKey := findSmallestKey(&hot.sparseBucketsNegative) + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) if smallestNegativeKey < smallestKey { smallestKey = smallestNegativeKey } if smallestKey == math.MaxInt32 { return false } - newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.sparseSchema)) - if newZeroThreshold > h.sparseMaxZeroThreshold { + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { return false // New threshold would exceed the max threshold. } - atomic.StoreUint64(&cold.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) // Remove applicable buckets. - if _, loaded := cold.sparseBucketsNegative.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.sparseBucketsNumber) + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) } - if _, loaded := cold.sparseBucketsPositive.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.sparseBucketsNumber) + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) } // Make cold counts the new hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) @@ -903,7 +913,7 @@ func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { // Add all the now cold counts to the new hot counts... addAndResetCounts(hot, cold) // ...adjust the new zero threshold in the cold counts, too... - atomic.StoreUint64(&cold.sparseZeroThresholdBits, math.Float64bits(newZeroThreshold)) + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) // ...and then merge the newly deleted buckets into the wider zero // bucket. mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { @@ -912,14 +922,14 @@ func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { bucket := v.(*int64) if key == smallestKey { // Merge into hot zero bucket... - atomic.AddUint64(&hot.sparseZeroBucket, uint64(atomic.LoadInt64(bucket))) + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) // ...and delete from cold counts. coldBuckets.Delete(key) - atomicDecUint32(&cold.sparseBucketsNumber) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) } else { // Add to corresponding hot bucket... - if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.sparseBucketsNumber, 1) + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) } // ...and reset cold bucket. atomic.StoreInt64(bucket, 0) @@ -928,8 +938,8 @@ func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { } } - cold.sparseBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.sparseBucketsPositive, &cold.sparseBucketsPositive)) - cold.sparseBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.sparseBucketsNegative, &cold.sparseBucketsNegative)) + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) return true } @@ -938,16 +948,16 @@ func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { // bucket count (or even no reduction at all). The method does nothing if the // schema is already -4. func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { - coldSchema := atomic.LoadInt32(&cold.sparseSchema) + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) if coldSchema == -4 { return // Already at lowest resolution. } coldSchema-- - atomic.StoreInt32(&cold.sparseSchema, coldSchema) + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) // Play it simple and just delete all cold buckets. - atomic.StoreUint32(&cold.sparseBucketsNumber, 0) - deleteSyncMap(&cold.sparseBucketsNegative) - deleteSyncMap(&cold.sparseBucketsPositive) + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) // Make coldCounts the new hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) count := n & ((1 << 63) - 1) @@ -958,7 +968,7 @@ func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { // Add all the now cold counts to the new hot counts... addAndResetCounts(hot, cold) // ...adjust the schema in the cold counts, too... - atomic.StoreInt32(&cold.sparseSchema, coldSchema) + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) // ...and then merge the cold buckets into the wider hot buckets. merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { return func(k, v interface{}) bool { @@ -970,33 +980,33 @@ func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { } key /= 2 // Add to corresponding hot bucket. - if addToSparseBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.sparseBucketsNumber, 1) + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) } return true } } - cold.sparseBucketsPositive.Range(merge(&hot.sparseBucketsPositive)) - cold.sparseBucketsNegative.Range(merge(&hot.sparseBucketsNegative)) + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) // Play it simple again and just delete all cold buckets. - atomic.StoreUint32(&cold.sparseBucketsNumber, 0) - deleteSyncMap(&cold.sparseBucketsNegative) - deleteSyncMap(&cold.sparseBucketsPositive) + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) } func (h *histogram) resetCounts(counts *histogramCounts) { atomic.StoreUint64(&counts.sumBits, 0) atomic.StoreUint64(&counts.count, 0) - atomic.StoreUint64(&counts.sparseZeroBucket, 0) - atomic.StoreUint64(&counts.sparseZeroThresholdBits, math.Float64bits(h.sparseZeroThreshold)) - atomic.StoreInt32(&counts.sparseSchema, h.sparseSchema) - atomic.StoreUint32(&counts.sparseBucketsNumber, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) for i := range h.upperBounds { atomic.StoreUint64(&counts.buckets[i], 0) } - deleteSyncMap(&counts.sparseBucketsNegative) - deleteSyncMap(&counts.sparseBucketsPositive) + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } // updateExemplar replaces the exemplar for the provided bucket. With empty @@ -1247,13 +1257,13 @@ func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } -// pickSparseschema returns the largest number n between -4 and 8 such that +// pickSchema returns the largest number n between -4 and 8 such that // 2^(2^-n) is less or equal the provided bucketFactor. // // Special cases: // - bucketFactor <= 1: panics. // - bucketFactor < 2^(2^-8) (but > 1): still returns 8. -func pickSparseSchema(bucketFactor float64) int32 { +func pickSchema(bucketFactor float64) int32 { if bucketFactor <= 1 { panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) } @@ -1268,7 +1278,7 @@ func pickSparseSchema(bucketFactor float64) int32 { } } -func makeSparseBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { var ii []int buckets.Range(func(k, v interface{}) bool { ii = append(ii, k.(int)) @@ -1323,9 +1333,9 @@ func makeSparseBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { return spans, deltas } -// addToSparseBucket increments the sparse bucket at key by the provided -// amount. It returns true if a new sparse bucket had to be created for that. -func addToSparseBucket(buckets *sync.Map, key int, increment int64) bool { +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { if existingBucket, ok := buckets.Load(key); ok { // Fast path without allocation. atomic.AddInt64(existingBucket.(*int64), increment) @@ -1350,7 +1360,7 @@ func addToSparseBucket(buckets *sync.Map, key int, increment int64) bool { func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { return func(k, v interface{}) bool { bucket := v.(*int64) - if addToSparseBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { atomic.AddUint32(bucketNumber, 1) } atomic.StoreInt64(bucket, 0) @@ -1420,7 +1430,7 @@ func getLe(key int, schema int32) float64 { } fracIdx := key & ((1 << schema) - 1) - frac := sparseBounds[schema][fracIdx] + frac := nativeHistogramBounds[schema][fracIdx] exp := (key >> schema) + 1 if frac == 0.5 && exp == 1025 { // This is the last bucket before the overflow bucket (for ±Inf @@ -1456,9 +1466,9 @@ func atomicDecUint32(p *uint32) { atomic.AddUint32(p, ^uint32(0)) } -// addAndResetCounts adds certain fields (count, sum, conventional buckets, -// sparse zero bucket) from the cold counts to the corresponding fields in the -// hot counts. Those fields are then reset to 0 in the cold counts. +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) atomic.StoreUint64(&cold.count, 0) @@ -1469,6 +1479,6 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) atomic.StoreUint64(&cold.buckets[i], 0) } - atomic.AddUint64(&hot.sparseZeroBucket, atomic.LoadUint64(&cold.sparseZeroBucket)) - atomic.StoreUint64(&cold.sparseZeroBucket, 0) + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } diff --git a/prometheus/histogram_test.go b/prometheus/histogram_test.go index fa80249..80a3189 100644 --- a/prometheus/histogram_test.go +++ b/prometheus/histogram_test.go @@ -656,13 +656,13 @@ func TestSparseHistogram(t *testing.T) { for _, s := range scenarios { t.Run(s.name, func(t *testing.T) { his := NewHistogram(HistogramOpts{ - Name: "name", - Help: "help", - SparseBucketsFactor: s.factor, - SparseBucketsZeroThreshold: s.zeroThreshold, - SparseBucketsMaxNumber: s.maxBuckets, - SparseBucketsMinResetDuration: s.minResetDuration, - SparseBucketsMaxZeroThreshold: s.maxZeroThreshold, + Name: "name", + Help: "help", + NativeHistogramBucketFactor: s.factor, + NativeHistogramZeroThreshold: s.zeroThreshold, + NativeHistogramMaxBucketNumber: s.maxBuckets, + NativeHistogramMinResetDuration: s.minResetDuration, + NativeHistogramMaxZeroThreshold: s.maxZeroThreshold, }) ts := time.Now().Add(30 * time.Second) now := func() time.Time { @@ -702,13 +702,13 @@ func TestSparseHistogramConcurrency(t *testing.T) { end.Add(concLevel) his := NewHistogram(HistogramOpts{ - Name: "test_sparse_histogram", - Help: "This help is sparse.", - SparseBucketsFactor: 1.05, - SparseBucketsZeroThreshold: 0.0000001, - SparseBucketsMaxNumber: 50, - SparseBucketsMinResetDuration: time.Hour, // Comment out to test for totals below. - SparseBucketsMaxZeroThreshold: 0.001, + Name: "test_sparse_histogram", + Help: "This help is sparse.", + NativeHistogramBucketFactor: 1.05, + NativeHistogramZeroThreshold: 0.0000001, + NativeHistogramMaxBucketNumber: 50, + NativeHistogramMinResetDuration: time.Hour, // Comment out to test for totals below. + NativeHistogramMaxZeroThreshold: 0.001, }) ts := time.Now().Add(30 * time.Second).Unix()