acquire/release segments only when needed

This commit is contained in:
gobwas 2016-02-23 00:26:06 +03:00
parent a1783e9986
commit 71fc92e3a7
17 changed files with 132 additions and 213 deletions

View File

@ -26,7 +26,7 @@ func (self AnyOf) Match(s string) bool {
func (self AnyOf) Index(s string) (int, []int) {
index := -1
segments := make([]int, 0, len(s))
segments := acquireSegments(len(s))
for _, m := range self.Matchers {
idx, seg := m.Index(s)
@ -49,6 +49,7 @@ func (self AnyOf) Index(s string) (int, []int) {
}
if index == -1 {
releaseSegments(segments)
return -1, nil
}

View File

@ -50,7 +50,8 @@ func BenchmarkIndexAnyParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -81,6 +81,7 @@ func (self BTree) Match(s string) bool {
// search for matching part in substring
index, segments := self.Value.Index(s[offset:limit])
if index == -1 {
releaseSegments(segments)
return false
}
@ -112,6 +113,7 @@ func (self BTree) Match(s string) bool {
}
if right {
releaseSegments(segments)
return true
}
}
@ -119,6 +121,8 @@ func (self BTree) Match(s string) bool {
_, step := utf8.DecodeRuneInString(s[offset+index:])
offset += index + step
releaseSegments(segments)
}
return false

View File

@ -33,7 +33,7 @@ func (self Contains) Index(s string) (int, []int) {
s = s[:idx]
}
segments := make([]int, 0, len(s)+1)
segments := acquireSegments(len(s) + 1)
for i, _ := range s {
segments = append(segments, offset+i)
}

View File

@ -57,7 +57,8 @@ func BenchmarkIndexContains(b *testing.B) {
m := Contains{string(bench_separators), true}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -66,7 +67,8 @@ func BenchmarkIndexContainsParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -24,7 +24,7 @@ func (self Prefix) Index(s string) (int, []int) {
sub = ""
}
segments := make([]int, 0, len(sub)+1)
segments := acquireSegments(len(sub) + 1)
segments = append(segments, length)
for i, r := range sub {
segments = append(segments, length+i+utf8.RuneLen(r))

View File

@ -21,7 +21,7 @@ func (self PrefixSuffix) Index(s string) (int, []int) {
return prefixIdx, []int{len(s) - prefixIdx}
}
segments := make([]int, 0, len(s)-prefixIdx)
segments := acquireSegments(len(s) - prefixIdx)
for sub := s[prefixIdx:]; ; {
suffixIdx := strings.LastIndex(sub, self.Suffix)
if suffixIdx == -1 {
@ -33,7 +33,7 @@ func (self PrefixSuffix) Index(s string) (int, []int) {
}
if len(segments) == 0 {
return -1, nil
return -1, segments
}
reverseSegments(segments)

View File

@ -50,7 +50,8 @@ func BenchmarkIndexPrefixSuffix(b *testing.B) {
m := PrefixSuffix{"qew", "sqw"}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -59,7 +60,8 @@ func BenchmarkIndexPrefixSuffixParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -40,7 +40,8 @@ func BenchmarkIndexPrefix(b *testing.B) {
m := Prefix{"qew"}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -49,7 +50,8 @@ func BenchmarkIndexPrefixParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -50,7 +50,8 @@ func BenchmarkIndexRange(b *testing.B) {
m := Range{'0', '9', false}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -59,7 +60,8 @@ func BenchmarkIndexRangeParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -61,7 +61,8 @@ func BenchmarkRowIndex(b *testing.B) {
}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -77,7 +78,8 @@ func BenchmarkIndexRowParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -1,65 +1,83 @@
package match
import (
"sync"
"testing"
)
func BenchmarkPerfPoolSequenced(b *testing.B) {
pool := NewPoolSequenced(512, func() []int {
return make([]int, 0, 16)
})
func benchPool(i int, b *testing.B) {
pool := sync.Pool{New: func() interface{} {
return make([]int, 0, i)
}}
b.SetParallelism(32)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
s := pool.Get()
s := pool.Get().([]int)[:0]
pool.Put(s)
}
})
}
func BenchmarkPerfPoolSynced(b *testing.B) {
pool := NewPoolSynced(32)
b.SetParallelism(32)
func benchMake(i int, b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
s := pool.Get()
pool.Put(s)
_ = make([]int, 0, i)
}
})
}
func BenchmarkPerfPoolNative(b *testing.B) {
pool := NewPoolNative(func() []int {
return make([]int, 0, 16)
})
b.SetParallelism(32)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
s := pool.Get()
pool.Put(s)
func BenchmarkSegmentsPool_1(b *testing.B) {
benchPool(1, b)
}
})
func BenchmarkSegmentsPool_2(b *testing.B) {
benchPool(2, b)
}
func BenchmarkSegmentsPool_4(b *testing.B) {
benchPool(4, b)
}
func BenchmarkSegmentsPool_8(b *testing.B) {
benchPool(8, b)
}
func BenchmarkSegmentsPool_16(b *testing.B) {
benchPool(16, b)
}
func BenchmarkSegmentsPool_32(b *testing.B) {
benchPool(32, b)
}
func BenchmarkSegmentsPool_64(b *testing.B) {
benchPool(64, b)
}
func BenchmarkSegmentsPool_128(b *testing.B) {
benchPool(128, b)
}
func BenchmarkSegmentsPool_256(b *testing.B) {
benchPool(256, b)
}
func BenchmarkPerfPoolStatic(b *testing.B) {
pool := NewPoolStatic(32, func() []int {
return make([]int, 0, 16)
})
b.SetParallelism(32)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
i, v := pool.Get()
pool.Put(i, v)
func BenchmarkSegmentsMake_1(b *testing.B) {
benchMake(1, b)
}
})
func BenchmarkSegmentsMake_2(b *testing.B) {
benchMake(2, b)
}
func BenchmarkPerfMake(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = make([]int, 0, 32)
func BenchmarkSegmentsMake_4(b *testing.B) {
benchMake(4, b)
}
func BenchmarkSegmentsMake_8(b *testing.B) {
benchMake(8, b)
}
func BenchmarkSegmentsMake_16(b *testing.B) {
benchMake(16, b)
}
func BenchmarkSegmentsMake_32(b *testing.B) {
benchMake(32, b)
}
func BenchmarkSegmentsMake_64(b *testing.B) {
benchMake(64, b)
}
func BenchmarkSegmentsMake_128(b *testing.B) {
benchMake(128, b)
}
func BenchmarkSegmentsMake_256(b *testing.B) {
benchMake(256, b)
}

View File

@ -2,10 +2,9 @@ package match
import (
"sync"
"sync/atomic"
)
var segmentsPools [1024]*PoolNative
var segmentsPools [1024]*sync.Pool
func toPowerOfTwo(v int) int {
v--
@ -20,174 +19,52 @@ func toPowerOfTwo(v int) int {
}
const (
minSegment = 4
minSegmentMinusOne = 3
maxSegment = 1024
maxSegmentMinusOne = 1023
cacheFrom = 16
cacheToAndHigher = 1024
cacheFromIndex = 15
cacheToAndHigherIndex = 1023
)
func init() {
for i := maxSegment; i >= minSegment; i >>= 1 {
for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 {
func(i int) {
segmentsPools[i-1] = NewPoolNative(func() []int {
segmentsPools[i-1] = &sync.Pool{New: func() interface{} {
return make([]int, 0, i)
})
}}
}(i)
}
}
func getIdx(c int) int {
func getTableIndex(c int) int {
p := toPowerOfTwo(c)
switch {
case p >= maxSegment:
return maxSegmentMinusOne
case p <= minSegment:
return minSegmentMinusOne
case p >= cacheToAndHigher:
return cacheToAndHigherIndex
case p <= cacheFrom:
return cacheFromIndex
default:
return p - 1
}
}
func acquireSegments(c int) []int {
return segmentsPools[getIdx(c)].Get()
// make []int with less capacity than cacheFrom
// is faster than acquiring it from pool
if c < cacheFrom {
return make([]int, 0, c)
}
return segmentsPools[getTableIndex(c)].Get().([]int)[:0]
}
func releaseSegments(s []int) {
segmentsPools[getIdx(cap(s))].Put(s)
}
c := cap(s)
type newSegmentsFunc func() []int
// Pool holds Clients.
type PoolSequenced struct {
new newSegmentsFunc
pool chan []int
}
// NewPool creates a new pool of Clients.
func NewPoolSequenced(size int, f newSegmentsFunc) *PoolSequenced {
return &PoolSequenced{
new: f,
pool: make(chan []int, size),
}
}
// Borrow a Client from the pool.
func (p *PoolSequenced) Get() []int {
var s []int
select {
case s = <-p.pool:
default:
s = p.new()
}
return s[:0]
}
// Return returns a Client to the pool.
func (p *PoolSequenced) Put(s []int) {
select {
case p.pool <- s:
default:
// let it go, let it go...
}
}
type PoolSynced struct {
size int
mu sync.Mutex
list [][]int
}
func NewPoolSynced(size int) *PoolSynced {
return &PoolSynced{
size: size,
}
}
func (p *PoolSynced) Get() []int {
var s []int
p.mu.Lock()
ll := len(p.list)
if ll > 0 {
s, p.list = p.list[ll-1], p.list[:ll-1]
}
p.mu.Unlock()
if s == nil {
return make([]int, 0, p.size)
}
return s[:0]
}
func (p *PoolSynced) Put(s []int) {
p.mu.Lock()
defer p.mu.Unlock()
p.list = append(p.list, s)
}
type PoolNative struct {
pool *sync.Pool
}
func NewPoolNative(f newSegmentsFunc) *PoolNative {
return &PoolNative{
pool: &sync.Pool{New: func() interface{} {
return f()
}},
}
}
func (p *PoolNative) Get() []int {
return p.pool.Get().([]int)[:0]
}
func (p *PoolNative) Put(s []int) {
p.pool.Put(s)
}
type segments struct {
data []int
locked int32
}
type PoolStatic struct {
f newSegmentsFunc
pool []*segments
}
func NewPoolStatic(size int, f newSegmentsFunc) *PoolStatic {
p := &PoolStatic{
f: f,
pool: make([]*segments, 0, size),
}
for i := 0; i < size; i++ {
p.pool = append(p.pool, &segments{
data: f(),
})
}
return p
}
func (p *PoolStatic) Get() (int, []int) {
for i, s := range p.pool {
if atomic.CompareAndSwapInt32(&s.locked, 0, 1) {
return i, s.data
}
}
return -1, p.f()
}
func (p *PoolStatic) Put(i int, s []int) {
if i < 0 {
// make []int with less capacity than cacheFrom
// is faster than acquiring it from pool
if c < cacheFrom {
return
}
p.pool[i].data = s
atomic.CompareAndSwapInt32(&(p.pool[i].locked), 1, 0)
segmentsPools[getTableIndex(cap(s))].Put(s)
}

View File

@ -40,7 +40,8 @@ func BenchmarkIndexSingle(b *testing.B) {
m := Single{bench_separators}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -49,7 +50,8 @@ func BenchmarkIndexSingleParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -40,7 +40,8 @@ func BenchmarkIndexSuffix(b *testing.B) {
m := Suffix{"qwe"}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -49,7 +50,8 @@ func BenchmarkIndexSuffixParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -37,7 +37,8 @@ func BenchmarkIndexSuper(b *testing.B) {
m := Super{}
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -46,7 +47,8 @@ func BenchmarkIndexSuperParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}

View File

@ -40,7 +40,8 @@ func BenchmarkIndexText(b *testing.B) {
m := NewText("foo")
for i := 0; i < b.N; i++ {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
}
@ -49,7 +50,8 @@ func BenchmarkIndexTextParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Index(bench_pattern)
_, s := m.Index(bench_pattern)
releaseSegments(s)
}
})
}