Add: exponential backoff for CAS operations on floats (#1661)
* add: exponential backoff for CAS operations of floats Signed-off-by: Ivan Goncharov <i.morph@gmail.com> * add: some more benchmark use cases (higher contention) Signed-off-by: Ivan Goncharov <i.morph@gmail.com> * fmt: fumpted some files Signed-off-by: Ivan Goncharov <i.morph@gmail.com> * add: license header Signed-off-by: Ivan Goncharov <i.morph@gmail.com> * add: comment explaining origin of backoff constants Signed-off-by: Ivan Goncharov <i.morph@gmail.com> --------- Signed-off-by: Ivan Goncharov <i.morph@gmail.com>
This commit is contained in:
parent
bab92a7743
commit
a934c35951
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// atomicUpdateFloat atomically updates the float64 value pointed to by bits
|
||||||
|
// using the provided updateFunc, with an exponential backoff on contention.
|
||||||
|
func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) {
|
||||||
|
const (
|
||||||
|
// both numbers are derived from empirical observations
|
||||||
|
// documented in this PR: https://github.com/prometheus/client_golang/pull/1661
|
||||||
|
maxBackoff = 320 * time.Millisecond
|
||||||
|
initialBackoff = 10 * time.Millisecond
|
||||||
|
)
|
||||||
|
backoff := initialBackoff
|
||||||
|
|
||||||
|
for {
|
||||||
|
loadedBits := atomic.LoadUint64(bits)
|
||||||
|
oldFloat := math.Float64frombits(loadedBits)
|
||||||
|
newFloat := updateFunc(oldFloat)
|
||||||
|
newBits := math.Float64bits(newFloat)
|
||||||
|
|
||||||
|
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
// Exponential backoff with sleep and cap to avoid infinite wait
|
||||||
|
time.Sleep(backoff)
|
||||||
|
backoff *= 2
|
||||||
|
if backoff > maxBackoff {
|
||||||
|
backoff = maxBackoff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,167 @@
|
||||||
|
// Copyright 2014 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var output float64
|
||||||
|
|
||||||
|
func TestAtomicUpdateFloat(t *testing.T) {
|
||||||
|
var val float64 = 0.0
|
||||||
|
bits := (*uint64)(unsafe.Pointer(&val))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
numGoroutines := 100000
|
||||||
|
increment := 1.0
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
atomicUpdateFloat(bits, func(f float64) float64 {
|
||||||
|
return f + increment
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
expected := float64(numGoroutines) * increment
|
||||||
|
if val != expected {
|
||||||
|
t.Errorf("Expected %f, got %f", expected, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark for atomicUpdateFloat with single goroutine (no contention).
|
||||||
|
func BenchmarkAtomicUpdateFloat_SingleGoroutine(b *testing.B) {
|
||||||
|
var val float64 = 0.0
|
||||||
|
bits := (*uint64)(unsafe.Pointer(&val))
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
atomicUpdateFloat(bits, func(f float64) float64 {
|
||||||
|
return f + 1.0
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
output = val
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark for old implementation with single goroutine (no contention) -> to check overhead of backoff
|
||||||
|
func BenchmarkAtomicNoBackoff_SingleGoroutine(b *testing.B) {
|
||||||
|
var val float64 = 0.0
|
||||||
|
bits := (*uint64)(unsafe.Pointer(&val))
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
for {
|
||||||
|
loadedBits := atomic.LoadUint64(bits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0)
|
||||||
|
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output = val
|
||||||
|
}
|
||||||
|
|
||||||
|
// Benchmark varying the number of goroutines.
|
||||||
|
func benchmarkAtomicUpdateFloatConcurrency(b *testing.B, numGoroutines int) {
|
||||||
|
var val float64 = 0.0
|
||||||
|
bits := (*uint64)(unsafe.Pointer(&val))
|
||||||
|
b.SetParallelism(numGoroutines)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
|
for pb.Next() {
|
||||||
|
atomicUpdateFloat(bits, func(f float64) float64 {
|
||||||
|
return f + 1.0
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
output = val
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkAtomicNoBackoffFloatConcurrency(b *testing.B, numGoroutines int) {
|
||||||
|
var val float64 = 0.0
|
||||||
|
bits := (*uint64)(unsafe.Pointer(&val))
|
||||||
|
b.SetParallelism(numGoroutines)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
|
for pb.Next() {
|
||||||
|
for {
|
||||||
|
loadedBits := atomic.LoadUint64(bits)
|
||||||
|
newBits := math.Float64bits(math.Float64frombits(loadedBits) + 1.0)
|
||||||
|
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
output = val
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicUpdateFloat_1Goroutine(b *testing.B) {
|
||||||
|
benchmarkAtomicUpdateFloatConcurrency(b, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicNoBackoff_1Goroutine(b *testing.B) {
|
||||||
|
benchmarkAtomicNoBackoffFloatConcurrency(b, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicUpdateFloat_2Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicUpdateFloatConcurrency(b, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicNoBackoff_2Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicNoBackoffFloatConcurrency(b, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicUpdateFloat_4Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicUpdateFloatConcurrency(b, 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicNoBackoff_4Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicNoBackoffFloatConcurrency(b, 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicUpdateFloat_8Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicUpdateFloatConcurrency(b, 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicNoBackoff_8Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicNoBackoffFloatConcurrency(b, 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicUpdateFloat_16Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicUpdateFloatConcurrency(b, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicNoBackoff_16Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicNoBackoffFloatConcurrency(b, 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicUpdateFloat_32Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicUpdateFloatConcurrency(b, 32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAtomicNoBackoff_32Goroutines(b *testing.B) {
|
||||||
|
benchmarkAtomicNoBackoffFloatConcurrency(b, 32)
|
||||||
|
}
|
|
@ -134,13 +134,9 @@ func (c *counter) Add(v float64) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 {
|
||||||
oldBits := atomic.LoadUint64(&c.valBits)
|
return oldVal + v
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
})
|
||||||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||||
|
|
|
@ -120,13 +120,9 @@ func (g *gauge) Dec() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gauge) Add(val float64) {
|
func (g *gauge) Add(val float64) {
|
||||||
for {
|
atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 {
|
||||||
oldBits := atomic.LoadUint64(&g.valBits)
|
return oldVal + val
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
})
|
||||||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gauge) Sub(val float64) {
|
func (g *gauge) Sub(val float64) {
|
||||||
|
|
|
@ -1641,13 +1641,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) {
|
||||||
// atomicAddFloat adds the provided float atomically to another float
|
// atomicAddFloat adds the provided float atomically to another float
|
||||||
// represented by the bit pattern the bits pointer is pointing to.
|
// represented by the bit pattern the bits pointer is pointing to.
|
||||||
func atomicAddFloat(bits *uint64, v float64) {
|
func atomicAddFloat(bits *uint64, v float64) {
|
||||||
for {
|
atomicUpdateFloat(bits, func(oldVal float64) float64 {
|
||||||
loadedBits := atomic.LoadUint64(bits)
|
return oldVal + v
|
||||||
newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
|
})
|
||||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// atomicDecUint32 atomically decrements the uint32 p points to. See
|
// atomicDecUint32 atomically decrements the uint32 p points to. See
|
||||||
|
|
|
@ -471,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) {
|
||||||
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||||
hotCounts := s.counts[n>>63]
|
hotCounts := s.counts[n>>63]
|
||||||
|
|
||||||
for {
|
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
return oldVal + v
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
})
|
||||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Increment count last as we take it as a signal that the observation
|
// Increment count last as we take it as a signal that the observation
|
||||||
// is complete.
|
// is complete.
|
||||||
atomic.AddUint64(&hotCounts.count, 1)
|
atomic.AddUint64(&hotCounts.count, 1)
|
||||||
|
@ -519,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||||
atomic.AddUint64(&hotCounts.count, count)
|
atomic.AddUint64(&hotCounts.count, count)
|
||||||
atomic.StoreUint64(&coldCounts.count, 0)
|
atomic.StoreUint64(&coldCounts.count, 0)
|
||||||
for {
|
|
||||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
// Use atomicUpdateFloat to update hotCounts.sumBits atomically.
|
||||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
return oldVal + sum.GetSampleSum()
|
||||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
})
|
||||||
break
|
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue