From ddcdb57b63d63b821f23125bb940d5b631b7014b Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 20 May 2018 16:22:56 +0800 Subject: [PATCH] pass the benchmark test --- ants_benchmark_test.go | 2 +- ants_test.go | 41 +++++++++++++-------- pool.go | 84 ------------------------------------------ 3 files changed, 26 insertions(+), 101 deletions(-) diff --git a/ants_benchmark_test.go b/ants_benchmark_test.go index 274ba1c..5ce9e2b 100644 --- a/ants_benchmark_test.go +++ b/ants_benchmark_test.go @@ -6,7 +6,7 @@ import ( "sync" ) -const RunTimes = 10000000 +const RunTimes = 1000000 func BenchmarkGoroutine(b *testing.B) { for i := 0; i < b.N; i++ { diff --git a/ants_test.go b/ants_test.go index a8b79a6..e349a6d 100644 --- a/ants_test.go +++ b/ants_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/panjf2000/ants" "sync" - "runtime" "time" + "runtime" ) var n = 1000000 @@ -35,6 +35,7 @@ func TestNoPool(t *testing.T) { wg.Add(1) go func() { forSleep() + //demoFunc() wg.Done() }() } @@ -51,31 +52,39 @@ func TestDefaultPool(t *testing.T) { wg.Add(1) ants.Push(func() { forSleep() + //demoFunc() wg.Done() }) } wg.Wait() //t.Logf("pool capacity:%d", ants.Cap()) - //t.Logf("running workers number:%d", ants.Running()) //t.Logf("free workers number:%d", ants.Free()) + t.Logf("running workers number:%d", ants.Running()) mem := runtime.MemStats{} runtime.ReadMemStats(&mem) t.Logf("memory usage:%d", mem.TotalAlloc/1024) } -//func TestCustomPool(t *testing.T) { -// p := ants.NewPool(1000) -// for i := 0; i < n; i++ { -// p.Push(demoFunc) -// } -// -// t.Logf("pool capacity:%d", p.Cap()) -// t.Logf("running workers number:%d", p.Running()) -// t.Logf("free workers number:%d", p.Free()) -// -// mem := runtime.MemStats{} -// runtime.ReadMemStats(&mem) -// -//} +func TestCustomPool(t *testing.T) { + p := ants.NewPool(30000) + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + p.Push(func() { + forSleep() + //demoFunc() + wg.Done() + }) + } + wg.Wait() + + //t.Logf("pool capacity:%d", p.Cap()) + //t.Logf("free workers number:%d", p.Free()) + + t.Logf("running workers number:%d", p.Running()) + mem := runtime.MemStats{} + runtime.ReadMemStats(&mem) + t.Logf("memory usage:%d", mem.TotalAlloc/1024) +} diff --git a/pool.go b/pool.go index a7ee612..83e80ec 100644 --- a/pool.go +++ b/pool.go @@ -32,20 +32,6 @@ func NewPool(size int) *Pool { } //------------------------------------------------------------------------- -//func (p *Pool) loop() { -// for i := 0; i < runtime.GOMAXPROCS(-1); i++ { -// go func() { -// for { -// select { -// case <-p.launchSignal: -// p.getWorker().sendTask(p.tasks.pop().(f)) -// case <-p.destroy: -// return -// } -// } -// }() -// } -//} func (p *Pool) Push(task f) error { if len(p.destroy) > 0 { @@ -79,70 +65,7 @@ func (p *Pool) Destroy() error { //------------------------------------------------------------------------- -func (p *Pool) reachLimit() bool { - return p.Running() >= p.Cap() -} - -//func (p *Pool) newWorker() *Worker { -// var w *Worker -// if p.reachLimit() { -// <-p.freeSignal -// return p.getWorker() -// } -// wp := p.workerPool.Get() -// if wp == nil { -// w = &Worker{ -// pool: p, -// task: make(chan f), -// } -// } else { -// w = wp.(*Worker) -// } -// w.run() -// atomic.AddInt32(&p.running, 1) -// return w -//} -// -//func (p *Pool) getWorker() *Worker { -// var w *Worker -// p.lock.Lock() -// workers := p.workers -// n := len(workers) - 1 -// if n < 0 { -// p.lock.Unlock() -// return p.newWorker() -// } else { -// w = workers[n] -// workers[n] = nil -// p.workers = workers[:n] -// //atomic.AddInt32(&p.running, 1) -// } -// p.lock.Unlock() -// return w -//} - -//func (p *Pool) newWorker() *Worker { -// var w *Worker -// if p.reachLimit() { -// <-p.freeSignal -// return p.getWorker() -// } -// wp := p.workerPool.Get() -// if wp == nil { -// w = &Worker{ -// pool: p, -// task: make(chan f), -// } -// } else { -// w = wp.(*Worker) -// } -// w.run() -// atomic.AddInt32(&p.running, 1) -// return w -//} - func (p *Pool) getWorker() *Worker { - //fmt.Printf("init running workers number:%d\n", p.running) var w *Worker waiting := false @@ -150,7 +73,6 @@ func (p *Pool) getWorker() *Worker { workers := p.workers n := len(workers) - 1 if n < 0 { - //fmt.Printf("running workers number:%d\n", p.running) if p.running >= p.capacity { waiting = true } @@ -158,15 +80,11 @@ func (p *Pool) getWorker() *Worker { w = workers[n] workers[n] = nil p.workers = workers[:n] - //atomic.AddInt32(&p.running, 1) } p.lock.Unlock() if waiting { <-p.freeSignal - //p.lock.Lock() - //fmt.Println("wait for a worker") - //fmt.Println("get for a worker") for { p.lock.Lock() workers = p.workers @@ -181,7 +99,6 @@ func (p *Pool) getWorker() *Worker { p.lock.Unlock() break } - //p.lock.Unlock() } else { wp := p.workerPool.Get() if wp == nil { @@ -204,5 +121,4 @@ func (p *Pool) putWorker(worker *Worker) { p.workers = append(p.workers, worker) p.lock.Unlock() p.freeSignal <- sig{} - //fmt.Printf("put a worker, running worker number:%d\n", p.Running()) }