// MIT License // Copyright (c) 2018 Andy Pan // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. package ants import ( "sync" "sync/atomic" "time" "github.com/panjf2000/ants/v2/internal" ) // Pool accepts the tasks from client, it limits the total of goroutines to a given number by recycling goroutines. type Pool struct { // capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to // avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool // which submits a new task to the same pool. capacity int32 // running is the number of the currently running goroutines. running int32 // workers is a slice that store the available workers. workers workerArray // state is used to notice the pool to closed itself. state int32 // lock for synchronous operation. lock sync.Locker // cond for waiting to get a idle worker. cond *sync.Cond // workerCache speeds up the obtainment of the an usable worker in function:retrieveWorker. workerCache sync.Pool // blockingNum is the number of the goroutines already been blocked on pool.Submit, protected by pool.lock blockingNum int options *Options } // purgePeriodically clears expired workers periodically which runs in an individual goroutine, as a scavenger. func (p *Pool) purgePeriodically() { heartbeat := time.NewTicker(p.options.ExpiryDuration) defer heartbeat.Stop() for range heartbeat.C { if p.IsClosed() { break } p.lock.Lock() expiredWorkers := p.workers.retrieveExpiry(p.options.ExpiryDuration) p.lock.Unlock() // Notify obsolete workers to stop. // This notification must be outside the p.lock, since w.task // may be blocking and may consume a lot of time if many workers // are located on non-local CPUs. for i := range expiredWorkers { expiredWorkers[i].task <- nil expiredWorkers[i] = nil } // There might be a situation that all workers have been cleaned up(no any worker is running) // while some invokers still get stuck in "p.cond.Wait()", // then it ought to wakes all those invokers. if p.Running() == 0 { p.cond.Broadcast() } } } // NewPool generates an instance of ants pool. func NewPool(size int, options ...Option) (*Pool, error) { opts := loadOptions(options...) if size <= 0 { size = -1 } if expiry := opts.ExpiryDuration; expiry < 0 { return nil, ErrInvalidPoolExpiry } else if expiry == 0 { opts.ExpiryDuration = DefaultCleanIntervalTime } if opts.Logger == nil { opts.Logger = defaultLogger } p := &Pool{ capacity: int32(size), lock: internal.NewSpinLock(), options: opts, } p.workerCache.New = func() interface{} { return &goWorker{ pool: p, task: make(chan func(), workerChanCap), } } if p.options.PreAlloc { if size == -1 { return nil, ErrInvalidPreAllocSize } p.workers = newWorkerArray(loopQueueType, size) } else { p.workers = newWorkerArray(stackType, 0) } p.cond = sync.NewCond(p.lock) // Start a goroutine to clean up expired workers periodically. go p.purgePeriodically() return p, nil } // --------------------------------------------------------------------------- // Submit submits a task to this pool. func (p *Pool) Submit(task func()) error { if p.IsClosed() { return ErrPoolClosed } var w *goWorker if w = p.retrieveWorker(); w == nil { return ErrPoolOverload } w.task <- task return nil } // Running returns the number of the currently running goroutines. func (p *Pool) Running() int { return int(atomic.LoadInt32(&p.running)) } // Free returns the available goroutines to work. func (p *Pool) Free() int { return p.Cap() - p.Running() } // Cap returns the capacity of this pool. func (p *Pool) Cap() int { return int(atomic.LoadInt32(&p.capacity)) } // Tune changes the capacity of this pool, this method is noneffective to the infinite pool. func (p *Pool) Tune(size int) { if capacity := p.Cap(); capacity == -1 || size <= 0 || size == capacity || p.options.PreAlloc { return } atomic.StoreInt32(&p.capacity, int32(size)) } // IsClosed indicates whether the pool is closed. func (p *Pool) IsClosed() bool { return atomic.LoadInt32(&p.state) == CLOSED } // Release Closes this pool. func (p *Pool) Release() { atomic.StoreInt32(&p.state, CLOSED) p.lock.Lock() p.workers.reset() p.lock.Unlock() // There might be some callers waiting in retrieveWorker(), so we need to wake them up to prevent // those callers blocking infinitely. p.cond.Broadcast() } // Reboot reboots a released pool. func (p *Pool) Reboot() { if atomic.CompareAndSwapInt32(&p.state, CLOSED, OPENED) { go p.purgePeriodically() } } // --------------------------------------------------------------------------- // incRunning increases the number of the currently running goroutines. func (p *Pool) incRunning() { atomic.AddInt32(&p.running, 1) } // decRunning decreases the number of the currently running goroutines. func (p *Pool) decRunning() { atomic.AddInt32(&p.running, -1) } // retrieveWorker returns a available worker to run the tasks. func (p *Pool) retrieveWorker() (w *goWorker) { spawnWorker := func() { w = p.workerCache.Get().(*goWorker) w.run() } p.lock.Lock() w = p.workers.detach() if w != nil { p.lock.Unlock() } else if capacity := p.Cap(); capacity == -1 { p.lock.Unlock() spawnWorker() } else if p.Running() < capacity { p.lock.Unlock() spawnWorker() } else { if p.options.Nonblocking { p.lock.Unlock() return } Reentry: if p.options.MaxBlockingTasks != 0 && p.blockingNum >= p.options.MaxBlockingTasks { p.lock.Unlock() return } p.blockingNum++ p.cond.Wait() p.blockingNum-- if p.Running() == 0 { if !p.IsClosed() { p.lock.Unlock() spawnWorker() } return } w = p.workers.detach() if w == nil { goto Reentry } p.lock.Unlock() } return } // revertWorker puts a worker back into free pool, recycling the goroutines. func (p *Pool) revertWorker(worker *goWorker) bool { if capacity := p.Cap(); (capacity > 0 && p.Running() > capacity) || p.IsClosed() { return false } worker.recycleTime = time.Now() p.lock.Lock() // To avoid memory leaks, add a double check in the lock scope. // Issue: https://github.com/panjf2000/ants/issues/113 if p.IsClosed() { p.lock.Unlock() return false } err := p.workers.insert(worker) if err != nil { p.lock.Unlock() return false } // Notify the invoker stuck in 'retrieveWorker()' of there is an available worker in the worker queue. p.cond.Signal() p.lock.Unlock() return true }