Allow a zero threshold of zero
Signed-off-by: beorn7 <beorn@grafana.com>
This commit is contained in:
parent
aa6f67a9e6
commit
9ef5f90a76
|
@ -538,8 +538,6 @@ func ExampleHistogram() {
|
|||
// cumulative_count: 816
|
||||
// upper_bound: 40
|
||||
// >
|
||||
// sb_schema: 0
|
||||
// sb_zero_threshold: 0
|
||||
// >
|
||||
}
|
||||
|
||||
|
|
|
@ -369,8 +369,13 @@ type HistogramOpts struct {
|
|||
// SparseBucketsZeroThreshold are accumulated into a “zero” bucket. For
|
||||
// best results, this should be close to a bucket boundary. This is
|
||||
// usually the case if picking a power of two. If
|
||||
// SparseBucketsZeroThreshold is left at zero (or set to a negative
|
||||
// value), DefSparseBucketsZeroThreshold is used as the threshold.
|
||||
// SparseBucketsZeroThreshold is left at zero,
|
||||
// DefSparseBucketsZeroThreshold is used as the threshold. If it is set
|
||||
// to a negative value, a threshold of zero is used, i.e. only
|
||||
// observations of precisely zero will go into the zero
|
||||
// bucket. (TODO(beorn7): That's obviously weird and just a consequence
|
||||
// of making the zero value of HistogramOpts meaningful. Has to be
|
||||
// solved more elegantly in the final version.)
|
||||
SparseBucketsZeroThreshold float64
|
||||
// TODO(beorn7): Need a setting to limit total bucket count and to
|
||||
// configure a strategy to enforce the limit, e.g. if minimum duration
|
||||
|
@ -415,7 +420,6 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
h := &histogram{
|
||||
desc: desc,
|
||||
upperBounds: opts.Buckets,
|
||||
sparseThreshold: opts.SparseBucketsZeroThreshold,
|
||||
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||
counts: [2]*histogramCounts{{}, {}},
|
||||
now: time.Now,
|
||||
|
@ -423,12 +427,15 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
if len(h.upperBounds) == 0 && opts.SparseBucketsFactor <= 1 {
|
||||
h.upperBounds = DefBuckets
|
||||
}
|
||||
if h.sparseThreshold <= 0 {
|
||||
h.sparseThreshold = DefSparseBucketsZeroThreshold
|
||||
}
|
||||
if opts.SparseBucketsFactor <= 1 {
|
||||
h.sparseThreshold = 0 // To mark that there are no sparse buckets.
|
||||
h.sparseSchema = math.MinInt32 // To mark that there are no sparse buckets.
|
||||
} else {
|
||||
switch {
|
||||
case opts.SparseBucketsZeroThreshold > 0:
|
||||
h.sparseThreshold = opts.SparseBucketsZeroThreshold
|
||||
case opts.SparseBucketsZeroThreshold == 0:
|
||||
h.sparseThreshold = DefSparseBucketsZeroThreshold
|
||||
} // Leave h.sparseThreshold at 0 otherwise.
|
||||
h.sparseSchema = pickSparseSchema(opts.SparseBucketsFactor)
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
|
@ -559,8 +566,8 @@ type histogram struct {
|
|||
upperBounds []float64
|
||||
labelPairs []*dto.LabelPair
|
||||
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
|
||||
sparseSchema int32
|
||||
sparseThreshold float64 // This is zero iff no sparse buckets are used.
|
||||
sparseSchema int32 // Set to math.MinInt32 if no sparse buckets are used.
|
||||
sparseThreshold float64
|
||||
|
||||
now func() time.Time // To mock out time.Now() for testing.
|
||||
}
|
||||
|
@ -607,8 +614,6 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
|
||||
SampleCount: proto.Uint64(count),
|
||||
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
|
||||
SbSchema: &h.sparseSchema,
|
||||
SbZeroThreshold: &h.sparseThreshold,
|
||||
}
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
|
@ -648,7 +653,9 @@ func (h *histogram) Write(out *dto.Metric) error {
|
|||
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
|
||||
atomic.StoreUint64(&coldCounts.buckets[i], 0)
|
||||
}
|
||||
if h.sparseThreshold != 0 {
|
||||
if h.sparseSchema > math.MinInt32 {
|
||||
his.SbZeroThreshold = &h.sparseThreshold
|
||||
his.SbSchema = &h.sparseSchema
|
||||
zeroBucket := atomic.LoadUint64(&coldCounts.sparseZeroBucket)
|
||||
|
||||
defer func() {
|
||||
|
@ -749,7 +756,7 @@ func (h *histogram) findBucket(v float64) int {
|
|||
// observe is the implementation for Observe without the findBucket part.
|
||||
func (h *histogram) observe(v float64, bucket int) {
|
||||
// Do not add to sparse buckets for NaN observations.
|
||||
doSparse := h.sparseThreshold != 0 && !math.IsNaN(v)
|
||||
doSparse := h.sparseSchema > math.MinInt32 && !math.IsNaN(v)
|
||||
var whichSparse, sparseKey int
|
||||
if doSparse {
|
||||
switch {
|
||||
|
|
|
@ -470,7 +470,7 @@ func TestSparseHistogram(t *testing.T) {
|
|||
name: "no sparse buckets",
|
||||
observations: []float64{1, 2, 3},
|
||||
factor: 1,
|
||||
want: `sample_count:3 sample_sum:6 bucket:<cumulative_count:0 upper_bound:0.005 > bucket:<cumulative_count:0 upper_bound:0.01 > bucket:<cumulative_count:0 upper_bound:0.025 > bucket:<cumulative_count:0 upper_bound:0.05 > bucket:<cumulative_count:0 upper_bound:0.1 > bucket:<cumulative_count:0 upper_bound:0.25 > bucket:<cumulative_count:0 upper_bound:0.5 > bucket:<cumulative_count:1 upper_bound:1 > bucket:<cumulative_count:2 upper_bound:2.5 > bucket:<cumulative_count:3 upper_bound:5 > bucket:<cumulative_count:3 upper_bound:10 > sb_schema:0 sb_zero_threshold:0 `, // Has conventional buckets because there are no sparse buckets.
|
||||
want: `sample_count:3 sample_sum:6 bucket:<cumulative_count:0 upper_bound:0.005 > bucket:<cumulative_count:0 upper_bound:0.01 > bucket:<cumulative_count:0 upper_bound:0.025 > bucket:<cumulative_count:0 upper_bound:0.05 > bucket:<cumulative_count:0 upper_bound:0.1 > bucket:<cumulative_count:0 upper_bound:0.25 > bucket:<cumulative_count:0 upper_bound:0.5 > bucket:<cumulative_count:1 upper_bound:1 > bucket:<cumulative_count:2 upper_bound:2.5 > bucket:<cumulative_count:3 upper_bound:5 > bucket:<cumulative_count:3 upper_bound:10 > `, // Has conventional buckets because there are no sparse buckets.
|
||||
},
|
||||
{
|
||||
name: "factor 1.1 results in schema 3",
|
||||
|
|
Loading…
Reference in New Issue