forked from mirror/ants
pass the benchmark test
This commit is contained in:
parent
d24d6020c3
commit
ddcdb57b63
|
@ -6,7 +6,7 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
const RunTimes = 10000000
|
||||
const RunTimes = 1000000
|
||||
|
||||
func BenchmarkGoroutine(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
|
37
ants_test.go
37
ants_test.go
|
@ -4,8 +4,8 @@ import (
|
|||
"testing"
|
||||
"github.com/panjf2000/ants"
|
||||
"sync"
|
||||
"runtime"
|
||||
"time"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var n = 1000000
|
||||
|
@ -35,6 +35,7 @@ func TestNoPool(t *testing.T) {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
forSleep()
|
||||
//demoFunc()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
@ -51,31 +52,39 @@ func TestDefaultPool(t *testing.T) {
|
|||
wg.Add(1)
|
||||
ants.Push(func() {
|
||||
forSleep()
|
||||
//demoFunc()
|
||||
wg.Done()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
//t.Logf("pool capacity:%d", ants.Cap())
|
||||
//t.Logf("running workers number:%d", ants.Running())
|
||||
//t.Logf("free workers number:%d", ants.Free())
|
||||
|
||||
t.Logf("running workers number:%d", ants.Running())
|
||||
mem := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&mem)
|
||||
t.Logf("memory usage:%d", mem.TotalAlloc/1024)
|
||||
}
|
||||
|
||||
//func TestCustomPool(t *testing.T) {
|
||||
// p := ants.NewPool(1000)
|
||||
// for i := 0; i < n; i++ {
|
||||
// p.Push(demoFunc)
|
||||
// }
|
||||
//
|
||||
func TestCustomPool(t *testing.T) {
|
||||
p := ants.NewPool(30000)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < n; i++ {
|
||||
wg.Add(1)
|
||||
p.Push(func() {
|
||||
forSleep()
|
||||
//demoFunc()
|
||||
wg.Done()
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
//t.Logf("pool capacity:%d", p.Cap())
|
||||
// t.Logf("running workers number:%d", p.Running())
|
||||
//t.Logf("free workers number:%d", p.Free())
|
||||
//
|
||||
// mem := runtime.MemStats{}
|
||||
// runtime.ReadMemStats(&mem)
|
||||
//
|
||||
//}
|
||||
|
||||
t.Logf("running workers number:%d", p.Running())
|
||||
mem := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&mem)
|
||||
t.Logf("memory usage:%d", mem.TotalAlloc/1024)
|
||||
}
|
||||
|
|
84
pool.go
84
pool.go
|
@ -32,20 +32,6 @@ func NewPool(size int) *Pool {
|
|||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
//func (p *Pool) loop() {
|
||||
// for i := 0; i < runtime.GOMAXPROCS(-1); i++ {
|
||||
// go func() {
|
||||
// for {
|
||||
// select {
|
||||
// case <-p.launchSignal:
|
||||
// p.getWorker().sendTask(p.tasks.pop().(f))
|
||||
// case <-p.destroy:
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
// }
|
||||
//}
|
||||
|
||||
func (p *Pool) Push(task f) error {
|
||||
if len(p.destroy) > 0 {
|
||||
|
@ -79,70 +65,7 @@ func (p *Pool) Destroy() error {
|
|||
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func (p *Pool) reachLimit() bool {
|
||||
return p.Running() >= p.Cap()
|
||||
}
|
||||
|
||||
//func (p *Pool) newWorker() *Worker {
|
||||
// var w *Worker
|
||||
// if p.reachLimit() {
|
||||
// <-p.freeSignal
|
||||
// return p.getWorker()
|
||||
// }
|
||||
// wp := p.workerPool.Get()
|
||||
// if wp == nil {
|
||||
// w = &Worker{
|
||||
// pool: p,
|
||||
// task: make(chan f),
|
||||
// }
|
||||
// } else {
|
||||
// w = wp.(*Worker)
|
||||
// }
|
||||
// w.run()
|
||||
// atomic.AddInt32(&p.running, 1)
|
||||
// return w
|
||||
//}
|
||||
//
|
||||
//func (p *Pool) getWorker() *Worker {
|
||||
// var w *Worker
|
||||
// p.lock.Lock()
|
||||
// workers := p.workers
|
||||
// n := len(workers) - 1
|
||||
// if n < 0 {
|
||||
// p.lock.Unlock()
|
||||
// return p.newWorker()
|
||||
// } else {
|
||||
// w = workers[n]
|
||||
// workers[n] = nil
|
||||
// p.workers = workers[:n]
|
||||
// //atomic.AddInt32(&p.running, 1)
|
||||
// }
|
||||
// p.lock.Unlock()
|
||||
// return w
|
||||
//}
|
||||
|
||||
//func (p *Pool) newWorker() *Worker {
|
||||
// var w *Worker
|
||||
// if p.reachLimit() {
|
||||
// <-p.freeSignal
|
||||
// return p.getWorker()
|
||||
// }
|
||||
// wp := p.workerPool.Get()
|
||||
// if wp == nil {
|
||||
// w = &Worker{
|
||||
// pool: p,
|
||||
// task: make(chan f),
|
||||
// }
|
||||
// } else {
|
||||
// w = wp.(*Worker)
|
||||
// }
|
||||
// w.run()
|
||||
// atomic.AddInt32(&p.running, 1)
|
||||
// return w
|
||||
//}
|
||||
|
||||
func (p *Pool) getWorker() *Worker {
|
||||
//fmt.Printf("init running workers number:%d\n", p.running)
|
||||
var w *Worker
|
||||
waiting := false
|
||||
|
||||
|
@ -150,7 +73,6 @@ func (p *Pool) getWorker() *Worker {
|
|||
workers := p.workers
|
||||
n := len(workers) - 1
|
||||
if n < 0 {
|
||||
//fmt.Printf("running workers number:%d\n", p.running)
|
||||
if p.running >= p.capacity {
|
||||
waiting = true
|
||||
}
|
||||
|
@ -158,15 +80,11 @@ func (p *Pool) getWorker() *Worker {
|
|||
w = workers[n]
|
||||
workers[n] = nil
|
||||
p.workers = workers[:n]
|
||||
//atomic.AddInt32(&p.running, 1)
|
||||
}
|
||||
p.lock.Unlock()
|
||||
|
||||
if waiting {
|
||||
<-p.freeSignal
|
||||
//p.lock.Lock()
|
||||
//fmt.Println("wait for a worker")
|
||||
//fmt.Println("get for a worker")
|
||||
for {
|
||||
p.lock.Lock()
|
||||
workers = p.workers
|
||||
|
@ -181,7 +99,6 @@ func (p *Pool) getWorker() *Worker {
|
|||
p.lock.Unlock()
|
||||
break
|
||||
}
|
||||
//p.lock.Unlock()
|
||||
} else {
|
||||
wp := p.workerPool.Get()
|
||||
if wp == nil {
|
||||
|
@ -204,5 +121,4 @@ func (p *Pool) putWorker(worker *Worker) {
|
|||
p.workers = append(p.workers, worker)
|
||||
p.lock.Unlock()
|
||||
p.freeSignal <- sig{}
|
||||
//fmt.Printf("put a worker, running worker number:%d\n", p.Running())
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue