Port histogram improvements into noObjectivesSummary

Signed-off-by: beorn7 <beorn@soundcloud.com>
This commit is contained in:
beorn7 2019-02-11 19:10:17 +01:00
parent 295f7e4861
commit 4c99dd6630
1 changed files with 38 additions and 64 deletions

View File

@ -405,18 +405,21 @@ type summaryCounts struct {
} }
type noObjectivesSummary struct { type noObjectivesSummary struct {
// countAndHotIdx is a complicated one. For lock-free yet atomic // countAndHotIdx enables lock-free writes with use of atomic updates.
// observations, we need to save the total count of observations again, // The most significant bit is the hot index [0 or 1] of the count field
// combined with the index of the currently-hot counts struct, so that // below. Observe calls update the hot one. All remaining bits count the
// we can perform the operation on both values atomically. The least // number of Observe calls. Observe starts by incrementing this counter,
// significant bit defines the hot counts struct. The remaining 63 bits // and finish by incrementing the count field in the respective
// represent the total count of observations. This happens under the // summaryCounts, as a marker for completion.
// assumption that the 63bit count will never overflow. Rationale: An
// observations takes about 30ns. Let's assume it could happen in
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
// which is about 3000 years.
// //
// This has to be first in the struct for 64bit alignment. See // Calls of the Write method (which are non-mutating reads from the
// perspective of the summary) swap the hotcold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
// last observation on the now cool one has completed. All cool fields must
// be merged into the new hot before releasing writeMtx.
// Fields with atomic access first! See alignment constraint:
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64 countAndHotIdx uint64
@ -429,7 +432,6 @@ type noObjectivesSummary struct {
// pointers to guarantee 64bit alignment of the histogramCounts, see // pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*summaryCounts counts [2]*summaryCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
} }
@ -439,11 +441,11 @@ func (s *noObjectivesSummary) Desc() *Desc {
} }
func (s *noObjectivesSummary) Observe(v float64) { func (s *noObjectivesSummary) Observe(v float64) {
// We increment s.countAndHotIdx by 2 so that the counter in the upper // We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented by 1. At the same time, we get the new value // 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts. // back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&s.countAndHotIdx, 2) n := atomic.AddUint64(&s.countAndHotIdx, 1)
hotCounts := s.counts[n%2] hotCounts := s.counts[n>>63]
for { for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits) oldBits := atomic.LoadUint64(&hotCounts.sumBits)
@ -458,61 +460,33 @@ func (s *noObjectivesSummary) Observe(v float64) {
} }
func (s *noObjectivesSummary) Write(out *dto.Metric) error { func (s *noObjectivesSummary) Write(out *dto.Metric) error {
var ( // For simplicity, we protect this whole method by a mutex. It is not in
sum = &dto.Summary{} // the hot path, i.e. Observe is called much more often than Write. The
hotCounts, coldCounts *summaryCounts // complication of making Write lock-free isn't worth it, if possible at
count uint64 // all.
)
// For simplicity, we mutex the rest of this method. It is not in the
// hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it.
s.writeMtx.Lock() s.writeMtx.Lock()
defer s.writeMtx.Unlock() defer s.writeMtx.Unlock()
// This is a bit arcane, which is why the following spells out this if // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// clause in English: // without touching the count bits. See the struct comments for a full
// // description of the algorithm.
// If the currently-hot counts struct is #0, we atomically increment n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
// s.countAndHotIdx by 1 so that from now on Observe will use the counts // count is contained unchanged in the lower 63 bits.
// struct #1. Furthermore, the atomic increment gives us the new value, count := n & ((1 << 63) - 1)
// which, in its most significant 63 bits, tells us the count of // The most significant bit tells us which counts is hot. The complement
// observations done so far up to and including currently ongoing // is thus the cold one.
// observations still using the counts struct just changed from hot to hotCounts := s.counts[n>>63]
// cold. To have a normal uint64 for the count, we bitshift by 1 and coldCounts := s.counts[(^n)>>63]
// save the result in count. We also set s.hotIdx to 1 for the next
// Write call, and we will refer to counts #1 as hotCounts and to counts
// #0 as coldCounts.
//
// If the currently-hot counts struct is #1, we do the corresponding
// things the other way round. We have to _decrement_ s.countAndHotIdx
// (which is a bit arcane in itself, as we have to express -1 with an
// unsigned int...).
if s.hotIdx == 0 {
count = atomic.AddUint64(&s.countAndHotIdx, 1) >> 1
s.hotIdx = 1
hotCounts = s.counts[1]
coldCounts = s.counts[0]
} else {
count = atomic.AddUint64(&s.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
s.hotIdx = 0
hotCounts = s.counts[0]
coldCounts = s.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool // Await cooldown.
// down, i.e. wait for all observations still using it to finish. That's for count != atomic.LoadUint64(&coldCounts.count) {
// the case once the count in the cold counts struct is the same as the
// one atomically retrieved from the upper 63bits of s.countAndHotIdx.
for {
if count == atomic.LoadUint64(&coldCounts.count) {
break
}
runtime.Gosched() // Let observations get work done. runtime.Gosched() // Let observations get work done.
} }
sum.SampleCount = proto.Uint64(count) sum := &dto.Summary{
sum.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))) SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
out.Summary = sum out.Summary = sum
out.Label = s.labelPairs out.Label = s.labelPairs