opt: make ReleaseTimeout() more efficient in waiting workers to exit (#329)

This commit is contained in:
Andy Pan 2024-06-18 01:06:48 +08:00 committed by GitHub
parent 3ffd3daa37
commit 15e896153d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 126 additions and 91 deletions

92
pool.go
View File

@ -31,9 +31,7 @@ import (
syncx "github.com/panjf2000/ants/v2/internal/sync" syncx "github.com/panjf2000/ants/v2/internal/sync"
) )
// Pool accepts the tasks and process them concurrently, type poolCommon struct {
// it limits the total of goroutines to a given number by recycling goroutines.
type Pool struct {
// capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to // capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to
// avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool // avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool
// which submits a new task to the same pool. // which submits a new task to the same pool.
@ -54,6 +52,11 @@ type Pool struct {
// cond for waiting to get an idle worker. // cond for waiting to get an idle worker.
cond *sync.Cond cond *sync.Cond
// done is used to indicate that all workers are done.
allDone chan struct{}
// once is used to make sure the pool is closed just once.
once *sync.Once
// workerCache speeds up the obtainment of a usable worker in function:retrieveWorker. // workerCache speeds up the obtainment of a usable worker in function:retrieveWorker.
workerCache sync.Pool workerCache sync.Pool
@ -61,9 +64,11 @@ type Pool struct {
waiting int32 waiting int32
purgeDone int32 purgeDone int32
purgeCtx context.Context
stopPurge context.CancelFunc stopPurge context.CancelFunc
ticktockDone int32 ticktockDone int32
ticktockCtx context.Context
stopTicktock context.CancelFunc stopTicktock context.CancelFunc
now atomic.Value now atomic.Value
@ -71,8 +76,14 @@ type Pool struct {
options *Options options *Options
} }
// Pool accepts the tasks and process them concurrently,
// it limits the total of goroutines to a given number by recycling goroutines.
type Pool struct {
poolCommon
}
// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger. // purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger.
func (p *Pool) purgeStaleWorkers(ctx context.Context) { func (p *Pool) purgeStaleWorkers() {
ticker := time.NewTicker(p.options.ExpiryDuration) ticker := time.NewTicker(p.options.ExpiryDuration)
defer func() { defer func() {
@ -82,7 +93,7 @@ func (p *Pool) purgeStaleWorkers(ctx context.Context) {
for { for {
select { select {
case <-ctx.Done(): case <-p.purgeCtx.Done():
return return
case <-ticker.C: case <-ticker.C:
} }
@ -116,7 +127,7 @@ func (p *Pool) purgeStaleWorkers(ctx context.Context) {
} }
// ticktock is a goroutine that updates the current time in the pool regularly. // ticktock is a goroutine that updates the current time in the pool regularly.
func (p *Pool) ticktock(ctx context.Context) { func (p *Pool) ticktock() {
ticker := time.NewTicker(nowTimeUpdateInterval) ticker := time.NewTicker(nowTimeUpdateInterval)
defer func() { defer func() {
ticker.Stop() ticker.Stop()
@ -125,7 +136,7 @@ func (p *Pool) ticktock(ctx context.Context) {
for { for {
select { select {
case <-ctx.Done(): case <-p.ticktockCtx.Done():
return return
case <-ticker.C: case <-ticker.C:
} }
@ -144,16 +155,14 @@ func (p *Pool) goPurge() {
} }
// Start a goroutine to clean up expired workers periodically. // Start a goroutine to clean up expired workers periodically.
var ctx context.Context p.purgeCtx, p.stopPurge = context.WithCancel(context.Background())
ctx, p.stopPurge = context.WithCancel(context.Background()) go p.purgeStaleWorkers()
go p.purgeStaleWorkers(ctx)
} }
func (p *Pool) goTicktock() { func (p *Pool) goTicktock() {
p.now.Store(time.Now()) p.now.Store(time.Now())
var ctx context.Context p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background())
ctx, p.stopTicktock = context.WithCancel(context.Background()) go p.ticktock()
go p.ticktock(ctx)
} }
func (p *Pool) nowTime() time.Time { func (p *Pool) nowTime() time.Time {
@ -180,11 +189,13 @@ func NewPool(size int, options ...Option) (*Pool, error) {
opts.Logger = defaultLogger opts.Logger = defaultLogger
} }
p := &Pool{ p := &Pool{poolCommon: poolCommon{
capacity: int32(size), capacity: int32(size),
allDone: make(chan struct{}),
lock: syncx.NewSpinLock(), lock: syncx.NewSpinLock(),
once: &sync.Once{},
options: opts, options: opts,
} }}
p.workerCache.New = func() interface{} { p.workerCache.New = func() interface{} {
return &goWorker{ return &goWorker{
pool: p, pool: p,
@ -281,8 +292,10 @@ func (p *Pool) Release() {
p.stopPurge() p.stopPurge()
p.stopPurge = nil p.stopPurge = nil
} }
p.stopTicktock() if p.stopTicktock != nil {
p.stopTicktock = nil p.stopTicktock()
p.stopTicktock = nil
}
p.lock.Lock() p.lock.Lock()
p.workers.reset() p.workers.reset()
@ -297,19 +310,38 @@ func (p *Pool) ReleaseTimeout(timeout time.Duration) error {
if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil { if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil {
return ErrPoolClosed return ErrPoolClosed
} }
p.Release() p.Release()
interval := timeout / releaseTimeoutCount var purgeCh <-chan struct{}
endTime := time.Now().Add(timeout) if !p.options.DisablePurge {
for time.Now().Before(endTime) { purgeCh = p.purgeCtx.Done()
if p.Running() == 0 && } else {
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) && purgeCh = p.allDone
atomic.LoadInt32(&p.ticktockDone) == 1 { }
return nil
} if p.Running() == 0 {
time.Sleep(interval) p.once.Do(func() {
close(p.allDone)
})
}
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-timer.C:
return ErrTimeout
case <-p.allDone:
<-purgeCh
<-p.ticktockCtx.Done()
if p.Running() == 0 &&
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) &&
atomic.LoadInt32(&p.ticktockDone) == 1 {
return nil
}
}
} }
return ErrTimeout
} }
// Reboot reboots a closed pool. // Reboot reboots a closed pool.
@ -319,11 +351,13 @@ func (p *Pool) Reboot() {
p.goPurge() p.goPurge()
atomic.StoreInt32(&p.ticktockDone, 0) atomic.StoreInt32(&p.ticktockDone, 0)
p.goTicktock() p.goTicktock()
p.allDone = make(chan struct{})
p.once = &sync.Once{}
} }
} }
func (p *Pool) addRunning(delta int) { func (p *Pool) addRunning(delta int) int {
atomic.AddInt32(&p.running, int32(delta)) return int(atomic.AddInt32(&p.running, int32(delta)))
} }
func (p *Pool) addWaiting(delta int) { func (p *Pool) addWaiting(delta int) {

View File

@ -34,46 +34,14 @@ import (
// PoolWithFunc accepts the tasks and process them concurrently, // PoolWithFunc accepts the tasks and process them concurrently,
// it limits the total of goroutines to a given number by recycling goroutines. // it limits the total of goroutines to a given number by recycling goroutines.
type PoolWithFunc struct { type PoolWithFunc struct {
// capacity of the pool. poolCommon
capacity int32
// running is the number of the currently running goroutines.
running int32
// lock for protecting the worker queue.
lock sync.Locker
// workers is a slice that store the available workers.
workers workerQueue
// state is used to notice the pool to closed itself.
state int32
// cond for waiting to get an idle worker.
cond *sync.Cond
// poolFunc is the function for processing tasks. // poolFunc is the function for processing tasks.
poolFunc func(interface{}) poolFunc func(interface{})
// workerCache speeds up the obtainment of a usable worker in function:retrieveWorker.
workerCache sync.Pool
// waiting is the number of the goroutines already been blocked on pool.Invoke(), protected by pool.lock
waiting int32
purgeDone int32
stopPurge context.CancelFunc
ticktockDone int32
stopTicktock context.CancelFunc
now atomic.Value
options *Options
} }
// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger. // purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger.
func (p *PoolWithFunc) purgeStaleWorkers(ctx context.Context) { func (p *PoolWithFunc) purgeStaleWorkers() {
ticker := time.NewTicker(p.options.ExpiryDuration) ticker := time.NewTicker(p.options.ExpiryDuration)
defer func() { defer func() {
ticker.Stop() ticker.Stop()
@ -82,7 +50,7 @@ func (p *PoolWithFunc) purgeStaleWorkers(ctx context.Context) {
for { for {
select { select {
case <-ctx.Done(): case <-p.purgeCtx.Done():
return return
case <-ticker.C: case <-ticker.C:
} }
@ -116,7 +84,7 @@ func (p *PoolWithFunc) purgeStaleWorkers(ctx context.Context) {
} }
// ticktock is a goroutine that updates the current time in the pool regularly. // ticktock is a goroutine that updates the current time in the pool regularly.
func (p *PoolWithFunc) ticktock(ctx context.Context) { func (p *PoolWithFunc) ticktock() {
ticker := time.NewTicker(nowTimeUpdateInterval) ticker := time.NewTicker(nowTimeUpdateInterval)
defer func() { defer func() {
ticker.Stop() ticker.Stop()
@ -125,7 +93,7 @@ func (p *PoolWithFunc) ticktock(ctx context.Context) {
for { for {
select { select {
case <-ctx.Done(): case <-p.ticktockCtx.Done():
return return
case <-ticker.C: case <-ticker.C:
} }
@ -144,16 +112,14 @@ func (p *PoolWithFunc) goPurge() {
} }
// Start a goroutine to clean up expired workers periodically. // Start a goroutine to clean up expired workers periodically.
var ctx context.Context p.purgeCtx, p.stopPurge = context.WithCancel(context.Background())
ctx, p.stopPurge = context.WithCancel(context.Background()) go p.purgeStaleWorkers()
go p.purgeStaleWorkers(ctx)
} }
func (p *PoolWithFunc) goTicktock() { func (p *PoolWithFunc) goTicktock() {
p.now.Store(time.Now()) p.now.Store(time.Now())
var ctx context.Context p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background())
ctx, p.stopTicktock = context.WithCancel(context.Background()) go p.ticktock()
go p.ticktock(ctx)
} }
func (p *PoolWithFunc) nowTime() time.Time { func (p *PoolWithFunc) nowTime() time.Time {
@ -185,10 +151,14 @@ func NewPoolWithFunc(size int, pf func(interface{}), options ...Option) (*PoolWi
} }
p := &PoolWithFunc{ p := &PoolWithFunc{
capacity: int32(size), poolCommon: poolCommon{
capacity: int32(size),
allDone: make(chan struct{}),
lock: syncx.NewSpinLock(),
once: &sync.Once{},
options: opts,
},
poolFunc: pf, poolFunc: pf,
lock: syncx.NewSpinLock(),
options: opts,
} }
p.workerCache.New = func() interface{} { p.workerCache.New = func() interface{} {
return &goWorkerWithFunc{ return &goWorkerWithFunc{
@ -286,8 +256,10 @@ func (p *PoolWithFunc) Release() {
p.stopPurge() p.stopPurge()
p.stopPurge = nil p.stopPurge = nil
} }
p.stopTicktock() if p.stopTicktock != nil {
p.stopTicktock = nil p.stopTicktock()
p.stopTicktock = nil
}
p.lock.Lock() p.lock.Lock()
p.workers.reset() p.workers.reset()
@ -302,19 +274,38 @@ func (p *PoolWithFunc) ReleaseTimeout(timeout time.Duration) error {
if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil { if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil {
return ErrPoolClosed return ErrPoolClosed
} }
p.Release() p.Release()
interval := timeout / releaseTimeoutCount var purgeCh <-chan struct{}
endTime := time.Now().Add(timeout) if !p.options.DisablePurge {
for time.Now().Before(endTime) { purgeCh = p.purgeCtx.Done()
if p.Running() == 0 && } else {
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) && purgeCh = p.allDone
atomic.LoadInt32(&p.ticktockDone) == 1 { }
return nil
} if p.Running() == 0 {
time.Sleep(interval) p.once.Do(func() {
close(p.allDone)
})
}
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-timer.C:
return ErrTimeout
case <-p.allDone:
<-purgeCh
<-p.ticktockCtx.Done()
if p.Running() == 0 &&
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) &&
atomic.LoadInt32(&p.ticktockDone) == 1 {
return nil
}
}
} }
return ErrTimeout
} }
// Reboot reboots a closed pool. // Reboot reboots a closed pool.
@ -324,11 +315,13 @@ func (p *PoolWithFunc) Reboot() {
p.goPurge() p.goPurge()
atomic.StoreInt32(&p.ticktockDone, 0) atomic.StoreInt32(&p.ticktockDone, 0)
p.goTicktock() p.goTicktock()
p.allDone = make(chan struct{})
p.once = &sync.Once{}
} }
} }
func (p *PoolWithFunc) addRunning(delta int) { func (p *PoolWithFunc) addRunning(delta int) int {
atomic.AddInt32(&p.running, int32(delta)) return int(atomic.AddInt32(&p.running, int32(delta)))
} }
func (p *PoolWithFunc) addWaiting(delta int) { func (p *PoolWithFunc) addWaiting(delta int) {

View File

@ -47,7 +47,11 @@ func (w *goWorker) run() {
w.pool.addRunning(1) w.pool.addRunning(1)
go func() { go func() {
defer func() { defer func() {
w.pool.addRunning(-1) if w.pool.addRunning(-1) == 0 && w.pool.IsClosed() {
w.pool.once.Do(func() {
close(w.pool.allDone)
})
}
w.pool.workerCache.Put(w) w.pool.workerCache.Put(w)
if p := recover(); p != nil { if p := recover(); p != nil {
if ph := w.pool.options.PanicHandler; ph != nil { if ph := w.pool.options.PanicHandler; ph != nil {

View File

@ -47,7 +47,11 @@ func (w *goWorkerWithFunc) run() {
w.pool.addRunning(1) w.pool.addRunning(1)
go func() { go func() {
defer func() { defer func() {
w.pool.addRunning(-1) if w.pool.addRunning(-1) == 0 && w.pool.IsClosed() {
w.pool.once.Do(func() {
close(w.pool.allDone)
})
}
w.pool.workerCache.Put(w) w.pool.workerCache.Put(w)
if p := recover(); p != nil { if p := recover(); p != nil {
if ph := w.pool.options.PanicHandler; ph != nil { if ph := w.pool.options.PanicHandler; ph != nil {