use the "container/list" to keep workers

This commit is contained in:
Andy Pan 2018-05-19 19:08:31 +08:00
parent 2929cede54
commit 0fd8ba8dae
4 changed files with 65 additions and 63 deletions

View File

@ -22,6 +22,6 @@ func Free() int {
return defaultPool.Free() return defaultPool.Free()
} }
func Wait() { //func Wait() {
defaultPool.Wait() // defaultPool.Wait()
} //}

View File

@ -7,15 +7,19 @@ import (
"runtime" "runtime"
) )
var n = 10 var n = 100000
func demoFunc() { func demoFunc() {
var n int for i := 0; i < 1000000; i++ {
for i := 0; i < 10000; i++ {
n += i
} }
fmt.Printf("finish task with result:%d\n", n)
} }
//func demoFunc() {
// var n int
// for i := 0; i < 10000; i++ {
// n += i
// }
// fmt.Printf("finish task with result:%d\n", n)
//}
func TestDefaultPool(t *testing.T) { func TestDefaultPool(t *testing.T) {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
@ -26,7 +30,7 @@ func TestDefaultPool(t *testing.T) {
t.Logf("running workers number:%d", ants.Running()) t.Logf("running workers number:%d", ants.Running())
t.Logf("free workers number:%d", ants.Free()) t.Logf("free workers number:%d", ants.Free())
ants.Wait() //ants.Wait()
mem := runtime.MemStats{} mem := runtime.MemStats{}
runtime.ReadMemStats(&mem) runtime.ReadMemStats(&mem)

67
pool.go
View File

@ -11,33 +11,27 @@ type sig struct{}
type f func() type f func()
//type er interface{}
type Pool struct { type Pool struct {
capacity int32 capacity int32
running int32 running int32
//tasks chan er tasks *ConcurrentQueue
//workers chan er workers *ConcurrentQueue
tasks *sync.Pool
workers *sync.Pool
freeSignal chan sig freeSignal chan sig
launchSignal chan sig launchSignal chan sig
destroy chan sig destroy chan sig
m *sync.Mutex m *sync.Mutex
wg *sync.WaitGroup //wg *sync.WaitGroup
} }
func NewPool(size int) *Pool { func NewPool(size int) *Pool {
p := &Pool{ p := &Pool{
capacity: int32(size), capacity: int32(size),
//tasks: make(chan er, size), tasks: NewConcurrentQueue(),
//workers: make(chan er, size), workers: NewConcurrentQueue(),
tasks: &sync.Pool{},
workers: &sync.Pool{},
freeSignal: make(chan sig, math.MaxInt32), freeSignal: make(chan sig, math.MaxInt32),
launchSignal: make(chan sig, math.MaxInt32), launchSignal: make(chan sig, math.MaxInt32),
destroy: make(chan sig, runtime.GOMAXPROCS(-1)), destroy: make(chan sig, runtime.GOMAXPROCS(-1)),
wg: &sync.WaitGroup{}, //wg: &sync.WaitGroup{},
} }
p.loop() p.loop()
return p return p
@ -51,7 +45,7 @@ func (p *Pool) loop() {
for { for {
select { select {
case <-p.launchSignal: case <-p.launchSignal:
p.getWorker().sendTask(p.tasks.Get().(f)) p.getWorker().sendTask(p.tasks.pop().(f))
case <-p.destroy: case <-p.destroy:
return return
} }
@ -64,10 +58,9 @@ func (p *Pool) Push(task f) error {
if len(p.destroy) > 0 { if len(p.destroy) > 0 {
return nil return nil
} }
//p.tasks <- task p.tasks.push(task)
p.tasks.Put(task)
p.launchSignal <- sig{} p.launchSignal <- sig{}
p.wg.Add(1) //p.wg.Add(1)
return nil return nil
} }
func (p *Pool) Running() int { func (p *Pool) Running() int {
@ -82,9 +75,9 @@ func (p *Pool) Cap() int {
return int(atomic.LoadInt32(&p.capacity)) return int(atomic.LoadInt32(&p.capacity))
} }
func (p *Pool) Wait() { //func (p *Pool) Wait() {
p.wg.Wait() // p.wg.Wait()
} //}
func (p *Pool) Destroy() error { func (p *Pool) Destroy() error {
p.m.Lock() p.m.Lock()
@ -115,42 +108,16 @@ func (p *Pool) newWorker() *Worker {
return worker return worker
} }
//func (p *Pool) newWorker() *Worker {
// worker := &Worker{
// pool: p,
// task: make(chan f),
// exit: make(chan sig),
// }
// worker.run()
// return worker
//}
//func (p *Pool) getWorker() *Worker {
// defer atomic.AddInt32(&p.running, 1)
// var worker *Worker
// if p.reachLimit() {
// worker = (<-p.workers).(*Worker)
// } else {
// select {
// case w := <-p.workers:
// return w.(*Worker)
// default:
// worker = p.newWorker()
// }
// }
// return worker
//}
func (p *Pool) getWorker() *Worker { func (p *Pool) getWorker() *Worker {
defer atomic.AddInt32(&p.running, 1) defer atomic.AddInt32(&p.running, 1)
if w := p.workers.Get(); w != nil { if w := p.workers.pop(); w != nil {
return w.(*Worker) return w.(*Worker)
} }
return p.newWorker() return p.newWorker()
} }
func (p *Pool) PutWorker(worker *Worker) { func (p *Pool) PutWorker(worker *Worker) {
p.workers.Put(worker) p.workers.push(worker)
if p.reachLimit() { if p.reachLimit() {
p.freeSignal <- sig{} p.freeSignal <- sig{}
} }

View File

@ -1,6 +1,10 @@
package ants package ants
import "sync/atomic" import (
"sync/atomic"
"container/list"
"sync"
)
type Worker struct { type Worker struct {
pool *Pool pool *Pool
@ -14,9 +18,8 @@ func (w *Worker) run() {
select { select {
case f := <-w.task: case f := <-w.task:
f() f()
//w.pool.workers <- w w.pool.workers.push(w)
w.pool.workers.Put(w) //w.pool.wg.Done()
w.pool.wg.Done()
case <-w.exit: case <-w.exit:
atomic.AddInt32(&w.pool.running, -1) atomic.AddInt32(&w.pool.running, -1)
return return
@ -32,3 +35,31 @@ func (w *Worker) stop() {
func (w *Worker) sendTask(task f) { func (w *Worker) sendTask(task f) {
w.task <- task w.task <- task
} }
//--------------------------------------------------------------------------------
type ConcurrentQueue struct {
queue *list.List
m sync.Mutex
}
func NewConcurrentQueue() *ConcurrentQueue {
q := new(ConcurrentQueue)
q.queue = list.New()
return q
}
func (q *ConcurrentQueue) push(v interface{}) {
defer q.m.Unlock()
q.m.Lock()
q.queue.PushFront(v)
}
func (q *ConcurrentQueue) pop() interface{} {
defer q.m.Unlock()
q.m.Lock()
if elem := q.queue.Back(); elem != nil{
return q.queue.Remove(elem)
}
return nil
}