Merge branch 'develop'

This commit is contained in:
Andy Pan 2018-05-25 00:44:25 +08:00
commit bd4836ddec
8 changed files with 152 additions and 217 deletions

View File

@ -39,9 +39,9 @@ const (
// Init a instance pool when importing ants // Init a instance pool when importing ants
var defaultPool, _ = NewPool(DefaultPoolSize) var defaultPool, _ = NewPool(DefaultPoolSize)
// Push submit a task to pool // Submit submit a task to pool
func Push(task f) error { func Submit(task f) error {
return defaultPool.Push(task) return defaultPool.Submit(task)
} }
// Running returns the number of the currently running goroutines // Running returns the number of the currently running goroutines

View File

@ -23,7 +23,6 @@
package ants_test package ants_test
import ( import (
"runtime"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -43,58 +42,28 @@ const (
YiB // 1208925819614629174706176 YiB // 1208925819614629174706176
) )
const RunTimes = 10000000 const RunTimes = 10000000
const loop = 5 const loop = 10
func demoFunc() error {
time.Sleep(loop * time.Millisecond)
return nil
}
func demoPoolFunc(args interface{}) error { func demoPoolFunc(args interface{}) error {
// m := args.(int) //m := args.(int)
// var n int //var n int
// for i := 0; i < m; i++ { //for i := 0; i < m; i++ {
// n += i // n += i
// } //}
// return nil //return nil
n := args.(int) n := args.(int)
time.Sleep(time.Duration(n) * time.Millisecond) time.Sleep(time.Duration(n) * time.Millisecond)
return nil return nil
} }
func BenchmarkGoroutine(b *testing.B) {
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
for j := 0; j < RunTimes; j++ {
wg.Add(1)
go func() {
demoFunc()
wg.Done()
}()
}
wg.Wait()
}
mem := runtime.MemStats{}
runtime.ReadMemStats(&mem)
b.Logf("total memory usage:%d MB", mem.TotalAlloc/MiB)
}
func BenchmarkAntsPool(b *testing.B) {
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
for j := 0; j < RunTimes; j++ {
wg.Add(1)
ants.Push(func() {
demoFunc()
wg.Done()
})
}
wg.Wait()
}
mem := runtime.MemStats{}
runtime.ReadMemStats(&mem)
b.Logf("total memory usage:%d MB", mem.TotalAlloc/MiB)
}
func BenchmarkGoroutineWithFunc(b *testing.B) { func BenchmarkGoroutineWithFunc(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
var wg sync.WaitGroup var wg sync.WaitGroup
b.ResetTimer()
for j := 0; j < RunTimes; j++ { for j := 0; j < RunTimes; j++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
@ -104,9 +73,6 @@ func BenchmarkGoroutineWithFunc(b *testing.B) {
} }
wg.Wait() wg.Wait()
} }
mem := runtime.MemStats{}
runtime.ReadMemStats(&mem)
b.Logf("total memory usage:%d MB", mem.TotalAlloc/MiB)
} }
func BenchmarkAntsPoolWithFunc(b *testing.B) { func BenchmarkAntsPoolWithFunc(b *testing.B) {
@ -117,14 +83,29 @@ func BenchmarkAntsPoolWithFunc(b *testing.B) {
wg.Done() wg.Done()
return nil return nil
}) })
b.ResetTimer()
for j := 0; j < RunTimes; j++ { for j := 0; j < RunTimes; j++ {
wg.Add(1) wg.Add(1)
p.Serve(loop) p.Serve(loop)
} }
wg.Wait() wg.Wait()
b.Logf("running goroutines: %d", p.Running())
}
}
func BenchmarkGoroutine(b *testing.B) {
for i := 0; i < b.N; i++ {
for j := 0; j < RunTimes; j++ {
go demoFunc()
}
}
}
func BenchmarkAntsPool(b *testing.B) {
for i := 0; i < b.N; i++ {
for j := 0; j < RunTimes; j++ {
ants.Submit(demoFunc)
}
b.Logf("running goroutines: %d", ants.Running())
} }
mem := runtime.MemStats{}
runtime.ReadMemStats(&mem)
b.Logf("total memory usage:%d MB", mem.TotalAlloc/MiB)
} }

View File

@ -24,7 +24,6 @@ package ants_test
import ( import (
"runtime" "runtime"
"time"
"sync" "sync"
"testing" "testing"
@ -33,36 +32,14 @@ import (
var n = 10000000 var n = 10000000
//func demoFunc() {
// var n int
// for i := 0; i < 1000000; i++ {
// n += i
// }
//}
//func demoFunc() {
// var n int
// for i := 0; i < 10000; i++ {
// n += i
// }
// fmt.Printf("finish task with result:%d\n", n)
//}
func demoFunc() {
time.Sleep(10 * time.Millisecond)
// var n int
// for i := 0; i < 1000000; i++ {
// n += i
// }
}
func TestDefaultPool(t *testing.T) { func TestDefaultPool(t *testing.T) {
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
wg.Add(1) wg.Add(1)
ants.Push(func() { ants.Submit(func() error {
demoFunc() demoFunc()
wg.Done() wg.Done()
return nil
}) })
} }
wg.Wait() wg.Wait()
@ -73,7 +50,7 @@ func TestDefaultPool(t *testing.T) {
t.Logf("running workers number:%d", ants.Running()) t.Logf("running workers number:%d", ants.Running())
mem := runtime.MemStats{} mem := runtime.MemStats{}
runtime.ReadMemStats(&mem) runtime.ReadMemStats(&mem)
t.Logf("memory usage:%d", mem.TotalAlloc/MiB) t.Logf("memory usage:%d MB", mem.TotalAlloc/MiB)
} }
func TestNoPool(t *testing.T) { func TestNoPool(t *testing.T) {
@ -89,30 +66,30 @@ func TestNoPool(t *testing.T) {
wg.Wait() wg.Wait()
mem := runtime.MemStats{} mem := runtime.MemStats{}
runtime.ReadMemStats(&mem) runtime.ReadMemStats(&mem)
t.Logf("memory usage:%d", mem.TotalAlloc/MiB) t.Logf("memory usage:%d MB", mem.TotalAlloc/MiB)
} }
func TestAntsPoolWithFunc(t *testing.T) { // func TestAntsPoolWithFunc(t *testing.T) {
var wg sync.WaitGroup // var wg sync.WaitGroup
p, _ := ants.NewPoolWithFunc(50000, func(i interface{}) error { // p, _ := ants.NewPoolWithFunc(50000, func(i interface{}) error {
demoPoolFunc(i) // demoPoolFunc(i)
wg.Done() // wg.Done()
return nil // return nil
}) // })
for i := 0; i < n; i++ { // for i := 0; i < n; i++ {
wg.Add(1) // wg.Add(1)
p.Serve(n) // p.Serve(n)
} // }
wg.Wait() // wg.Wait()
//t.Logf("pool capacity:%d", ants.Cap()) // //t.Logf("pool capacity:%d", ants.Cap())
//t.Logf("free workers number:%d", ants.Free()) // //t.Logf("free workers number:%d", ants.Free())
t.Logf("running workers number:%d", p.Running()) // t.Logf("running workers number:%d", p.Running())
mem := runtime.MemStats{} // mem := runtime.MemStats{}
runtime.ReadMemStats(&mem) // runtime.ReadMemStats(&mem)
t.Logf("memory usage:%d", mem.TotalAlloc/GiB) // t.Logf("memory usage:%d", mem.TotalAlloc/GiB)
} // }
// func TestNoPool(t *testing.T) { // func TestNoPool(t *testing.T) {
// var wg sync.WaitGroup // var wg sync.WaitGroup
@ -135,7 +112,7 @@ func TestAntsPoolWithFunc(t *testing.T) {
// var wg sync.WaitGroup // var wg sync.WaitGroup
// for i := 0; i < n; i++ { // for i := 0; i < n; i++ {
// wg.Add(1) // wg.Add(1)
// p.Push(func() { // p.Submit(func() {
// demoFunc() // demoFunc()
// //demoFunc() // //demoFunc()
// wg.Done() // wg.Done()

View File

@ -25,15 +25,17 @@ package main
import ( import (
"fmt" "fmt"
"sync" "sync"
"sync/atomic"
"github.com/panjf2000/ants" "github.com/panjf2000/ants"
) )
var str = "Hello World!" var sum int32
func myFunc(i interface{}) error { func myFunc(i interface{}) error {
s := i.(string) n := i.(int)
fmt.Println(s) atomic.AddInt32(&sum, int32(n))
fmt.Printf("run with %d\n", n)
return nil return nil
} }
@ -43,7 +45,7 @@ func myFunc(i interface{}) error {
// // submit all your tasks to ants pool // // submit all your tasks to ants pool
// for i := 0; i < runTimes; i++ { // for i := 0; i < runTimes; i++ {
// wg.Add(1) // wg.Add(1)
// ants.Push(func() { // ants.Submit(func() {
// myFunc() // myFunc()
// wg.Done() // wg.Done()
// }) // })
@ -58,7 +60,7 @@ func main() {
// set 100 the size of goroutine pool // set 100 the size of goroutine pool
var wg sync.WaitGroup var wg sync.WaitGroup
p, _ := ants.NewPoolWithFunc(100, func(i interface{}) error { p, _ := ants.NewPoolWithFunc(10, func(i interface{}) error {
myFunc(i) myFunc(i)
wg.Done() wg.Done()
return nil return nil
@ -66,8 +68,14 @@ func main() {
// submit // submit
for i := 0; i < runTimes; i++ { for i := 0; i < runTimes; i++ {
wg.Add(1) wg.Add(1)
p.Serve(str) p.Serve(i)
} }
wg.Wait() wg.Wait()
fmt.Println("finish all tasks!") //var m int
//var i int
//for n := range sum {
// m += n
//}
fmt.Printf("running goroutines: %d\n", p.Running())
fmt.Printf("finish all tasks, result is %d\n", sum)
} }

66
pool.go
View File

@ -26,14 +26,13 @@ import (
"math" "math"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
type sig struct{} type sig struct{}
type f func() type f func() error
// Pool accept the tasks from client,it will limit the total // Pool accept the tasks from client,it limits the total
// of goroutines to a given number by recycling goroutines. // of goroutines to a given number by recycling goroutines.
type Pool struct { type Pool struct {
// capacity of the pool. // capacity of the pool.
@ -49,16 +48,13 @@ type Pool struct {
// workers is a slice that store the available workers. // workers is a slice that store the available workers.
workers []*Worker workers []*Worker
// workerPool is a pool that saves a set of temporary objects.
workerPool sync.Pool
// release is used to notice the pool to closed itself. // release is used to notice the pool to closed itself.
release chan sig release chan sig
// lock for synchronous operation
lock sync.Mutex lock sync.Mutex
// closed is used to confirm whether this pool has been closed. once sync.Once
closed int32
} }
// NewPool generates a instance of ants pool // NewPool generates a instance of ants pool
@ -69,8 +65,7 @@ func NewPool(size int) (*Pool, error) {
p := &Pool{ p := &Pool{
capacity: int32(size), capacity: int32(size),
freeSignal: make(chan sig, math.MaxInt32), freeSignal: make(chan sig, math.MaxInt32),
release: make(chan sig), release: make(chan sig, 1),
closed: 0,
} }
return p, nil return p, nil
@ -78,27 +73,9 @@ func NewPool(size int) (*Pool, error) {
//------------------------------------------------------------------------- //-------------------------------------------------------------------------
// scanAndClean is a goroutine who will periodically clean up // Submit submit a task to pool
// after it is noticed that this pool is closed. func (p *Pool) Submit(task f) error {
func (p *Pool) scanAndClean() { if len(p.release) > 0 {
ticker := time.NewTicker(DefaultCleanIntervalTime * time.Second)
go func() {
ticker.Stop()
for range ticker.C {
if atomic.LoadInt32(&p.closed) == 1 {
p.lock.Lock()
for _, w := range p.workers {
w.stop()
}
p.lock.Unlock()
}
}
}()
}
// Push submit a task to pool
func (p *Pool) Push(task f) error {
if atomic.LoadInt32(&p.closed) == 1 {
return ErrPoolClosed return ErrPoolClosed
} }
w := p.getWorker() w := p.getWorker()
@ -123,10 +100,9 @@ func (p *Pool) Cap() int {
// Release Closed this pool // Release Closed this pool
func (p *Pool) Release() error { func (p *Pool) Release() error {
p.lock.Lock() p.once.Do(func() {
atomic.StoreInt32(&p.closed, 1) p.release <- sig{}
close(p.release) })
p.lock.Unlock()
return nil return nil
} }
@ -148,6 +124,8 @@ func (p *Pool) getWorker() *Worker {
if n < 0 { if n < 0 {
if p.running >= p.capacity { if p.running >= p.capacity {
waiting = true waiting = true
} else {
p.running++
} }
} else { } else {
w = workers[n] w = workers[n]
@ -173,17 +151,21 @@ func (p *Pool) getWorker() *Worker {
break break
} }
} else if w == nil { } else if w == nil {
wp := p.workerPool.Get() //wp := p.workerPool.Get()
if wp == nil { //if wp == nil {
// w = &Worker{
// pool: p,
// task: make(chan f, workerArgsCap),
// }
//} else {
// w = wp.(*Worker)
//}
w = &Worker{ w = &Worker{
pool: p, pool: p,
task: make(chan f, workerArgsCap), task: make(chan f),
}
} else {
w = wp.(*Worker)
} }
w.run() w.run()
p.workerPool.Put(w) //p.workerPool.Put(w)
} }
return w return w
} }

View File

@ -26,12 +26,11 @@ import (
"math" "math"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
type pf func(interface{}) error type pf func(interface{}) error
// PoolWithFunc accept the tasks from client,it will limit the total // PoolWithFunc accept the tasks from client,it limits the total
// of goroutines to a given number by recycling goroutines. // of goroutines to a given number by recycling goroutines.
type PoolWithFunc struct { type PoolWithFunc struct {
// capacity of the pool. // capacity of the pool.
@ -47,18 +46,16 @@ type PoolWithFunc struct {
// workers is a slice that store the available workers. // workers is a slice that store the available workers.
workers []*WorkerWithFunc workers []*WorkerWithFunc
// workerPool is a pool that saves a set of temporary objects.
workerPool sync.Pool
// release is used to notice the pool to closed itself. // release is used to notice the pool to closed itself.
release chan sig release chan sig
// lock for synchronous operation
lock sync.Mutex lock sync.Mutex
// closed is used to confirm whether this pool has been closed. // pf is the function for processing tasks
closed int32
poolFunc pf poolFunc pf
once sync.Once
} }
// NewPoolWithFunc generates a instance of ants pool with a specific function. // NewPoolWithFunc generates a instance of ants pool with a specific function.
@ -69,8 +66,7 @@ func NewPoolWithFunc(size int, f pf) (*PoolWithFunc, error) {
p := &PoolWithFunc{ p := &PoolWithFunc{
capacity: int32(size), capacity: int32(size),
freeSignal: make(chan sig, math.MaxInt32), freeSignal: make(chan sig, math.MaxInt32),
release: make(chan sig), release: make(chan sig, 1),
closed: 0,
poolFunc: f, poolFunc: f,
} }
@ -79,27 +75,12 @@ func NewPoolWithFunc(size int, f pf) (*PoolWithFunc, error) {
//------------------------------------------------------------------------- //-------------------------------------------------------------------------
// scanAndClean is a goroutine who will periodically clean up
// after it is noticed that this pool is closed.
func (p *PoolWithFunc) scanAndClean() {
ticker := time.NewTicker(DefaultCleanIntervalTime * time.Second)
go func() {
ticker.Stop()
for range ticker.C {
if atomic.LoadInt32(&p.closed) == 1 {
p.lock.Lock()
for _, w := range p.workers {
w.stop()
}
p.lock.Unlock()
}
}
}()
}
// Serve submit a task to pool // Serve submit a task to pool
func (p *PoolWithFunc) Serve(args interface{}) error { func (p *PoolWithFunc) Serve(args interface{}) error {
if atomic.LoadInt32(&p.closed) == 1 { //if atomic.LoadInt32(&p.closed) == 1 {
// return ErrPoolClosed
//}
if len(p.release) > 0 {
return ErrPoolClosed return ErrPoolClosed
} }
w := p.getWorker() w := p.getWorker()
@ -124,10 +105,9 @@ func (p *PoolWithFunc) Cap() int {
// Release Closed this pool // Release Closed this pool
func (p *PoolWithFunc) Release() error { func (p *PoolWithFunc) Release() error {
p.lock.Lock() p.once.Do(func() {
atomic.StoreInt32(&p.closed, 1) p.release <- sig{}
close(p.release) })
p.lock.Unlock()
return nil return nil
} }
@ -149,6 +129,8 @@ func (p *PoolWithFunc) getWorker() *WorkerWithFunc {
if n < 0 { if n < 0 {
if p.running >= p.capacity { if p.running >= p.capacity {
waiting = true waiting = true
} else {
p.running++
} }
} else { } else {
w = workers[n] w = workers[n]
@ -174,23 +156,28 @@ func (p *PoolWithFunc) getWorker() *WorkerWithFunc {
break break
} }
} else if w == nil { } else if w == nil {
wp := p.workerPool.Get() //wp := p.workerPool.Get()
if wp == nil { //if wp == nil {
// w = &WorkerWithFunc{
// pool: p,
// args: make(chan interface{}, workerArgsCap),
// }
//} else {
// w = wp.(*WorkerWithFunc)
//}
w = &WorkerWithFunc{ w = &WorkerWithFunc{
pool: p, pool: p,
args: make(chan interface{}, workerArgsCap), args: make(chan interface{}),
}
} else {
w = wp.(*WorkerWithFunc)
} }
w.run() w.run()
p.workerPool.Put(w) //p.workerPool.Put(w)
} }
return w return w
} }
// putWorker puts a worker back into free pool, recycling the goroutines. // putWorker puts a worker back into free pool, recycling the goroutines.
func (p *PoolWithFunc) putWorker(worker *WorkerWithFunc) { func (p *PoolWithFunc) putWorker(worker *WorkerWithFunc) {
//p.workerPool.Put(worker)
p.lock.Lock() p.lock.Lock()
p.workers = append(p.workers, worker) p.workers = append(p.workers, worker)
p.lock.Unlock() p.lock.Unlock()

View File

@ -26,9 +26,9 @@ import (
"sync/atomic" "sync/atomic"
) )
// Worker is the actual executor who run the tasks, // Worker is the actual executor who runs the tasks,
// it will start a goroutine that accept tasks and // it starts a goroutine that accepts tasks and
// perform function calls. // performs function calls.
type Worker struct { type Worker struct {
// pool who owns this worker. // pool who owns this worker.
pool *Pool pool *Pool
@ -37,13 +37,13 @@ type Worker struct {
task chan f task chan f
} }
// run will start a goroutine to repeat the process // run starts a goroutine to repeat the process
// that perform the function calls. // that performs the function calls.
func (w *Worker) run() { func (w *Worker) run() {
atomic.AddInt32(&w.pool.running, 1) //atomic.AddInt32(&w.pool.running, 1)
go func() { go func() {
for f := range w.task { for f := range w.task {
if f == nil { if f == nil || len(w.pool.release) > 0 {
atomic.AddInt32(&w.pool.running, -1) atomic.AddInt32(&w.pool.running, -1)
return return
} }
@ -58,7 +58,7 @@ func (w *Worker) stop() {
w.task <- nil w.task <- nil
} }
// sendTask send a task to this worker. // sendTask sends a task to this worker.
func (w *Worker) sendTask(task f) { func (w *Worker) sendTask(task f) {
w.task <- task w.task <- task
} }

View File

@ -26,9 +26,9 @@ import (
"sync/atomic" "sync/atomic"
) )
// Worker is the actual executor who run the tasks, // WorkerWithFunc is the actual executor who runs the tasks,
// it will start a goroutine that accept tasks and // it starts a goroutine that accepts tasks and
// perform function calls. // performs function calls.
type WorkerWithFunc struct { type WorkerWithFunc struct {
// pool who owns this worker. // pool who owns this worker.
pool *PoolWithFunc pool *PoolWithFunc
@ -37,13 +37,13 @@ type WorkerWithFunc struct {
args chan interface{} args chan interface{}
} }
// run will start a goroutine to repeat the process // run starts a goroutine to repeat the process
// that perform the function calls. // that performs the function calls.
func (w *WorkerWithFunc) run() { func (w *WorkerWithFunc) run() {
atomic.AddInt32(&w.pool.running, 1) //atomic.AddInt32(&w.pool.running, 1)
go func() { go func() {
for args := range w.args { for args := range w.args {
if args == nil { if args == nil || len(w.pool.release) > 0 {
atomic.AddInt32(&w.pool.running, -1) atomic.AddInt32(&w.pool.running, -1)
return return
} }
@ -58,7 +58,7 @@ func (w *WorkerWithFunc) stop() {
w.args <- nil w.args <- nil
} }
// sendTask send a task to this worker. // sendTask sends a task to this worker.
func (w *WorkerWithFunc) sendTask(args interface{}) { func (w *WorkerWithFunc) sendTask(args interface{}) {
w.args <- args w.args <- args
} }