pass the benchmark test

This commit is contained in:
Andy Pan 2018-05-20 16:22:56 +08:00
parent d24d6020c3
commit ddcdb57b63
3 changed files with 26 additions and 101 deletions

View File

@ -6,7 +6,7 @@ import (
"sync" "sync"
) )
const RunTimes = 10000000 const RunTimes = 1000000
func BenchmarkGoroutine(b *testing.B) { func BenchmarkGoroutine(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {

View File

@ -4,8 +4,8 @@ import (
"testing" "testing"
"github.com/panjf2000/ants" "github.com/panjf2000/ants"
"sync" "sync"
"runtime"
"time" "time"
"runtime"
) )
var n = 1000000 var n = 1000000
@ -35,6 +35,7 @@ func TestNoPool(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
forSleep() forSleep()
//demoFunc()
wg.Done() wg.Done()
}() }()
} }
@ -51,31 +52,39 @@ func TestDefaultPool(t *testing.T) {
wg.Add(1) wg.Add(1)
ants.Push(func() { ants.Push(func() {
forSleep() forSleep()
//demoFunc()
wg.Done() wg.Done()
}) })
} }
wg.Wait() wg.Wait()
//t.Logf("pool capacity:%d", ants.Cap()) //t.Logf("pool capacity:%d", ants.Cap())
//t.Logf("running workers number:%d", ants.Running())
//t.Logf("free workers number:%d", ants.Free()) //t.Logf("free workers number:%d", ants.Free())
t.Logf("running workers number:%d", ants.Running())
mem := runtime.MemStats{} mem := runtime.MemStats{}
runtime.ReadMemStats(&mem) runtime.ReadMemStats(&mem)
t.Logf("memory usage:%d", mem.TotalAlloc/1024) t.Logf("memory usage:%d", mem.TotalAlloc/1024)
} }
//func TestCustomPool(t *testing.T) { func TestCustomPool(t *testing.T) {
// p := ants.NewPool(1000) p := ants.NewPool(30000)
// for i := 0; i < n; i++ { var wg sync.WaitGroup
// p.Push(demoFunc) for i := 0; i < n; i++ {
// } wg.Add(1)
// p.Push(func() {
forSleep()
//demoFunc()
wg.Done()
})
}
wg.Wait()
//t.Logf("pool capacity:%d", p.Cap()) //t.Logf("pool capacity:%d", p.Cap())
// t.Logf("running workers number:%d", p.Running())
//t.Logf("free workers number:%d", p.Free()) //t.Logf("free workers number:%d", p.Free())
//
// mem := runtime.MemStats{} t.Logf("running workers number:%d", p.Running())
// runtime.ReadMemStats(&mem) mem := runtime.MemStats{}
// runtime.ReadMemStats(&mem)
//} t.Logf("memory usage:%d", mem.TotalAlloc/1024)
}

84
pool.go
View File

@ -32,20 +32,6 @@ func NewPool(size int) *Pool {
} }
//------------------------------------------------------------------------- //-------------------------------------------------------------------------
//func (p *Pool) loop() {
// for i := 0; i < runtime.GOMAXPROCS(-1); i++ {
// go func() {
// for {
// select {
// case <-p.launchSignal:
// p.getWorker().sendTask(p.tasks.pop().(f))
// case <-p.destroy:
// return
// }
// }
// }()
// }
//}
func (p *Pool) Push(task f) error { func (p *Pool) Push(task f) error {
if len(p.destroy) > 0 { if len(p.destroy) > 0 {
@ -79,70 +65,7 @@ func (p *Pool) Destroy() error {
//------------------------------------------------------------------------- //-------------------------------------------------------------------------
func (p *Pool) reachLimit() bool {
return p.Running() >= p.Cap()
}
//func (p *Pool) newWorker() *Worker {
// var w *Worker
// if p.reachLimit() {
// <-p.freeSignal
// return p.getWorker()
// }
// wp := p.workerPool.Get()
// if wp == nil {
// w = &Worker{
// pool: p,
// task: make(chan f),
// }
// } else {
// w = wp.(*Worker)
// }
// w.run()
// atomic.AddInt32(&p.running, 1)
// return w
//}
//
//func (p *Pool) getWorker() *Worker {
// var w *Worker
// p.lock.Lock()
// workers := p.workers
// n := len(workers) - 1
// if n < 0 {
// p.lock.Unlock()
// return p.newWorker()
// } else {
// w = workers[n]
// workers[n] = nil
// p.workers = workers[:n]
// //atomic.AddInt32(&p.running, 1)
// }
// p.lock.Unlock()
// return w
//}
//func (p *Pool) newWorker() *Worker {
// var w *Worker
// if p.reachLimit() {
// <-p.freeSignal
// return p.getWorker()
// }
// wp := p.workerPool.Get()
// if wp == nil {
// w = &Worker{
// pool: p,
// task: make(chan f),
// }
// } else {
// w = wp.(*Worker)
// }
// w.run()
// atomic.AddInt32(&p.running, 1)
// return w
//}
func (p *Pool) getWorker() *Worker { func (p *Pool) getWorker() *Worker {
//fmt.Printf("init running workers number:%d\n", p.running)
var w *Worker var w *Worker
waiting := false waiting := false
@ -150,7 +73,6 @@ func (p *Pool) getWorker() *Worker {
workers := p.workers workers := p.workers
n := len(workers) - 1 n := len(workers) - 1
if n < 0 { if n < 0 {
//fmt.Printf("running workers number:%d\n", p.running)
if p.running >= p.capacity { if p.running >= p.capacity {
waiting = true waiting = true
} }
@ -158,15 +80,11 @@ func (p *Pool) getWorker() *Worker {
w = workers[n] w = workers[n]
workers[n] = nil workers[n] = nil
p.workers = workers[:n] p.workers = workers[:n]
//atomic.AddInt32(&p.running, 1)
} }
p.lock.Unlock() p.lock.Unlock()
if waiting { if waiting {
<-p.freeSignal <-p.freeSignal
//p.lock.Lock()
//fmt.Println("wait for a worker")
//fmt.Println("get for a worker")
for { for {
p.lock.Lock() p.lock.Lock()
workers = p.workers workers = p.workers
@ -181,7 +99,6 @@ func (p *Pool) getWorker() *Worker {
p.lock.Unlock() p.lock.Unlock()
break break
} }
//p.lock.Unlock()
} else { } else {
wp := p.workerPool.Get() wp := p.workerPool.Get()
if wp == nil { if wp == nil {
@ -204,5 +121,4 @@ func (p *Pool) putWorker(worker *Worker) {
p.workers = append(p.workers, worker) p.workers = append(p.workers, worker)
p.lock.Unlock() p.lock.Unlock()
p.freeSignal <- sig{} p.freeSignal <- sig{}
//fmt.Printf("put a worker, running worker number:%d\n", p.Running())
} }