opt: make ReleaseTimeout() more efficient in waiting workers to exit (#329)

This commit is contained in:
Andy Pan 2024-06-18 01:06:48 +08:00 committed by GitHub
parent 3ffd3daa37
commit 15e896153d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 126 additions and 91 deletions

92
pool.go
View File

@ -31,9 +31,7 @@ import (
syncx "github.com/panjf2000/ants/v2/internal/sync"
)
// Pool accepts the tasks and process them concurrently,
// it limits the total of goroutines to a given number by recycling goroutines.
type Pool struct {
type poolCommon struct {
// capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to
// avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool
// which submits a new task to the same pool.
@ -54,6 +52,11 @@ type Pool struct {
// cond for waiting to get an idle worker.
cond *sync.Cond
// done is used to indicate that all workers are done.
allDone chan struct{}
// once is used to make sure the pool is closed just once.
once *sync.Once
// workerCache speeds up the obtainment of a usable worker in function:retrieveWorker.
workerCache sync.Pool
@ -61,9 +64,11 @@ type Pool struct {
waiting int32
purgeDone int32
purgeCtx context.Context
stopPurge context.CancelFunc
ticktockDone int32
ticktockCtx context.Context
stopTicktock context.CancelFunc
now atomic.Value
@ -71,8 +76,14 @@ type Pool struct {
options *Options
}
// Pool accepts the tasks and process them concurrently,
// it limits the total of goroutines to a given number by recycling goroutines.
type Pool struct {
poolCommon
}
// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger.
func (p *Pool) purgeStaleWorkers(ctx context.Context) {
func (p *Pool) purgeStaleWorkers() {
ticker := time.NewTicker(p.options.ExpiryDuration)
defer func() {
@ -82,7 +93,7 @@ func (p *Pool) purgeStaleWorkers(ctx context.Context) {
for {
select {
case <-ctx.Done():
case <-p.purgeCtx.Done():
return
case <-ticker.C:
}
@ -116,7 +127,7 @@ func (p *Pool) purgeStaleWorkers(ctx context.Context) {
}
// ticktock is a goroutine that updates the current time in the pool regularly.
func (p *Pool) ticktock(ctx context.Context) {
func (p *Pool) ticktock() {
ticker := time.NewTicker(nowTimeUpdateInterval)
defer func() {
ticker.Stop()
@ -125,7 +136,7 @@ func (p *Pool) ticktock(ctx context.Context) {
for {
select {
case <-ctx.Done():
case <-p.ticktockCtx.Done():
return
case <-ticker.C:
}
@ -144,16 +155,14 @@ func (p *Pool) goPurge() {
}
// Start a goroutine to clean up expired workers periodically.
var ctx context.Context
ctx, p.stopPurge = context.WithCancel(context.Background())
go p.purgeStaleWorkers(ctx)
p.purgeCtx, p.stopPurge = context.WithCancel(context.Background())
go p.purgeStaleWorkers()
}
func (p *Pool) goTicktock() {
p.now.Store(time.Now())
var ctx context.Context
ctx, p.stopTicktock = context.WithCancel(context.Background())
go p.ticktock(ctx)
p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background())
go p.ticktock()
}
func (p *Pool) nowTime() time.Time {
@ -180,11 +189,13 @@ func NewPool(size int, options ...Option) (*Pool, error) {
opts.Logger = defaultLogger
}
p := &Pool{
p := &Pool{poolCommon: poolCommon{
capacity: int32(size),
allDone: make(chan struct{}),
lock: syncx.NewSpinLock(),
once: &sync.Once{},
options: opts,
}
}}
p.workerCache.New = func() interface{} {
return &goWorker{
pool: p,
@ -281,8 +292,10 @@ func (p *Pool) Release() {
p.stopPurge()
p.stopPurge = nil
}
p.stopTicktock()
p.stopTicktock = nil
if p.stopTicktock != nil {
p.stopTicktock()
p.stopTicktock = nil
}
p.lock.Lock()
p.workers.reset()
@ -297,19 +310,38 @@ func (p *Pool) ReleaseTimeout(timeout time.Duration) error {
if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil {
return ErrPoolClosed
}
p.Release()
interval := timeout / releaseTimeoutCount
endTime := time.Now().Add(timeout)
for time.Now().Before(endTime) {
if p.Running() == 0 &&
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) &&
atomic.LoadInt32(&p.ticktockDone) == 1 {
return nil
}
time.Sleep(interval)
var purgeCh <-chan struct{}
if !p.options.DisablePurge {
purgeCh = p.purgeCtx.Done()
} else {
purgeCh = p.allDone
}
if p.Running() == 0 {
p.once.Do(func() {
close(p.allDone)
})
}
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-timer.C:
return ErrTimeout
case <-p.allDone:
<-purgeCh
<-p.ticktockCtx.Done()
if p.Running() == 0 &&
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) &&
atomic.LoadInt32(&p.ticktockDone) == 1 {
return nil
}
}
}
return ErrTimeout
}
// Reboot reboots a closed pool.
@ -319,11 +351,13 @@ func (p *Pool) Reboot() {
p.goPurge()
atomic.StoreInt32(&p.ticktockDone, 0)
p.goTicktock()
p.allDone = make(chan struct{})
p.once = &sync.Once{}
}
}
func (p *Pool) addRunning(delta int) {
atomic.AddInt32(&p.running, int32(delta))
func (p *Pool) addRunning(delta int) int {
return int(atomic.AddInt32(&p.running, int32(delta)))
}
func (p *Pool) addWaiting(delta int) {

View File

@ -34,46 +34,14 @@ import (
// PoolWithFunc accepts the tasks and process them concurrently,
// it limits the total of goroutines to a given number by recycling goroutines.
type PoolWithFunc struct {
// capacity of the pool.
capacity int32
// running is the number of the currently running goroutines.
running int32
// lock for protecting the worker queue.
lock sync.Locker
// workers is a slice that store the available workers.
workers workerQueue
// state is used to notice the pool to closed itself.
state int32
// cond for waiting to get an idle worker.
cond *sync.Cond
poolCommon
// poolFunc is the function for processing tasks.
poolFunc func(interface{})
// workerCache speeds up the obtainment of a usable worker in function:retrieveWorker.
workerCache sync.Pool
// waiting is the number of the goroutines already been blocked on pool.Invoke(), protected by pool.lock
waiting int32
purgeDone int32
stopPurge context.CancelFunc
ticktockDone int32
stopTicktock context.CancelFunc
now atomic.Value
options *Options
}
// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger.
func (p *PoolWithFunc) purgeStaleWorkers(ctx context.Context) {
func (p *PoolWithFunc) purgeStaleWorkers() {
ticker := time.NewTicker(p.options.ExpiryDuration)
defer func() {
ticker.Stop()
@ -82,7 +50,7 @@ func (p *PoolWithFunc) purgeStaleWorkers(ctx context.Context) {
for {
select {
case <-ctx.Done():
case <-p.purgeCtx.Done():
return
case <-ticker.C:
}
@ -116,7 +84,7 @@ func (p *PoolWithFunc) purgeStaleWorkers(ctx context.Context) {
}
// ticktock is a goroutine that updates the current time in the pool regularly.
func (p *PoolWithFunc) ticktock(ctx context.Context) {
func (p *PoolWithFunc) ticktock() {
ticker := time.NewTicker(nowTimeUpdateInterval)
defer func() {
ticker.Stop()
@ -125,7 +93,7 @@ func (p *PoolWithFunc) ticktock(ctx context.Context) {
for {
select {
case <-ctx.Done():
case <-p.ticktockCtx.Done():
return
case <-ticker.C:
}
@ -144,16 +112,14 @@ func (p *PoolWithFunc) goPurge() {
}
// Start a goroutine to clean up expired workers periodically.
var ctx context.Context
ctx, p.stopPurge = context.WithCancel(context.Background())
go p.purgeStaleWorkers(ctx)
p.purgeCtx, p.stopPurge = context.WithCancel(context.Background())
go p.purgeStaleWorkers()
}
func (p *PoolWithFunc) goTicktock() {
p.now.Store(time.Now())
var ctx context.Context
ctx, p.stopTicktock = context.WithCancel(context.Background())
go p.ticktock(ctx)
p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background())
go p.ticktock()
}
func (p *PoolWithFunc) nowTime() time.Time {
@ -185,10 +151,14 @@ func NewPoolWithFunc(size int, pf func(interface{}), options ...Option) (*PoolWi
}
p := &PoolWithFunc{
capacity: int32(size),
poolCommon: poolCommon{
capacity: int32(size),
allDone: make(chan struct{}),
lock: syncx.NewSpinLock(),
once: &sync.Once{},
options: opts,
},
poolFunc: pf,
lock: syncx.NewSpinLock(),
options: opts,
}
p.workerCache.New = func() interface{} {
return &goWorkerWithFunc{
@ -286,8 +256,10 @@ func (p *PoolWithFunc) Release() {
p.stopPurge()
p.stopPurge = nil
}
p.stopTicktock()
p.stopTicktock = nil
if p.stopTicktock != nil {
p.stopTicktock()
p.stopTicktock = nil
}
p.lock.Lock()
p.workers.reset()
@ -302,19 +274,38 @@ func (p *PoolWithFunc) ReleaseTimeout(timeout time.Duration) error {
if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil {
return ErrPoolClosed
}
p.Release()
interval := timeout / releaseTimeoutCount
endTime := time.Now().Add(timeout)
for time.Now().Before(endTime) {
if p.Running() == 0 &&
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) &&
atomic.LoadInt32(&p.ticktockDone) == 1 {
return nil
}
time.Sleep(interval)
var purgeCh <-chan struct{}
if !p.options.DisablePurge {
purgeCh = p.purgeCtx.Done()
} else {
purgeCh = p.allDone
}
if p.Running() == 0 {
p.once.Do(func() {
close(p.allDone)
})
}
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-timer.C:
return ErrTimeout
case <-p.allDone:
<-purgeCh
<-p.ticktockCtx.Done()
if p.Running() == 0 &&
(p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) &&
atomic.LoadInt32(&p.ticktockDone) == 1 {
return nil
}
}
}
return ErrTimeout
}
// Reboot reboots a closed pool.
@ -324,11 +315,13 @@ func (p *PoolWithFunc) Reboot() {
p.goPurge()
atomic.StoreInt32(&p.ticktockDone, 0)
p.goTicktock()
p.allDone = make(chan struct{})
p.once = &sync.Once{}
}
}
func (p *PoolWithFunc) addRunning(delta int) {
atomic.AddInt32(&p.running, int32(delta))
func (p *PoolWithFunc) addRunning(delta int) int {
return int(atomic.AddInt32(&p.running, int32(delta)))
}
func (p *PoolWithFunc) addWaiting(delta int) {

View File

@ -47,7 +47,11 @@ func (w *goWorker) run() {
w.pool.addRunning(1)
go func() {
defer func() {
w.pool.addRunning(-1)
if w.pool.addRunning(-1) == 0 && w.pool.IsClosed() {
w.pool.once.Do(func() {
close(w.pool.allDone)
})
}
w.pool.workerCache.Put(w)
if p := recover(); p != nil {
if ph := w.pool.options.PanicHandler; ph != nil {

View File

@ -47,7 +47,11 @@ func (w *goWorkerWithFunc) run() {
w.pool.addRunning(1)
go func() {
defer func() {
w.pool.addRunning(-1)
if w.pool.addRunning(-1) == 0 && w.pool.IsClosed() {
w.pool.once.Do(func() {
close(w.pool.allDone)
})
}
w.pool.workerCache.Put(w)
if p := recover(); p != nil {
if ph := w.pool.options.PanicHandler; ph != nil {