Implement Close and fix reaper goroutine leak.

This commit is contained in:
Vladimir Mihailenco 2015-05-01 09:33:47 +03:00
parent 593f01fceb
commit d00fb6ead9
1 changed files with 33 additions and 12 deletions

View File

@ -13,10 +13,11 @@ type ClusterClient struct {
addrs []string addrs []string
slots [][]string slots [][]string
slotsMx sync.RWMutex // protects slots & addrs cache slotsMx sync.RWMutex // Protects slots and addrs.
clients map[string]*Client clients map[string]*Client
clientsMx sync.RWMutex closed bool
clientsMx sync.RWMutex // Protects clients and closed.
opt *ClusterOptions opt *ClusterOptions
@ -39,15 +40,23 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
return client return client
} }
// Close closes the cluster client. // Close closes the cluster client, releasing any open resources.
//
// It is rare to Close a Client, as the Client is meant to be
// long-lived and shared between many goroutines.
func (c *ClusterClient) Close() error { func (c *ClusterClient) Close() error {
// TODO: close should make client unusable c.clientsMx.Lock()
c.setSlots(nil)
c.resetClients() if c.closed {
return nil return nil
} }
c.closed = true
c.resetClients()
c.setSlots(nil)
// ------------------------------------------------------------------------ c.clientsMx.Unlock()
return nil
}
// getClient returns a Client for a given address. // getClient returns a Client for a given address.
func (c *ClusterClient) getClient(addr string) (*Client, error) { func (c *ClusterClient) getClient(addr string) (*Client, error) {
@ -64,6 +73,10 @@ func (c *ClusterClient) getClient(addr string) (*Client, error) {
c.clientsMx.RUnlock() c.clientsMx.RUnlock()
c.clientsMx.Lock() c.clientsMx.Lock()
if c.closed {
return nil, errClosed
}
client, ok = c.clients[addr] client, ok = c.clients[addr]
if !ok { if !ok {
opt := c.opt.clientOptions() opt := c.opt.clientOptions()
@ -83,7 +96,7 @@ func (c *ClusterClient) slotAddrs(slot int) []string {
return addrs return addrs
} }
// randomClient returns a Client for the first live node. // randomClient returns a Client for the first pingable node.
func (c *ClusterClient) randomClient() (client *Client, err error) { func (c *ClusterClient) randomClient() (client *Client, err error) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
n := rand.Intn(len(c.addrs)) n := rand.Intn(len(c.addrs))
@ -165,14 +178,12 @@ func (c *ClusterClient) process(cmd Cmder) {
// Closes all clients and returns last error if there are any. // Closes all clients and returns last error if there are any.
func (c *ClusterClient) resetClients() (err error) { func (c *ClusterClient) resetClients() (err error) {
c.clientsMx.Lock()
for addr, client := range c.clients { for addr, client := range c.clients {
if e := client.Close(); e != nil { if e := client.Close(); e != nil {
err = e err = e
} }
delete(c.clients, addr) delete(c.clients, addr)
} }
c.clientsMx.Unlock()
return err return err
} }
@ -231,14 +242,24 @@ func (c *ClusterClient) scheduleReload() {
// reaper closes idle connections to the cluster. // reaper closes idle connections to the cluster.
func (c *ClusterClient) reaper(ticker *time.Ticker) { func (c *ClusterClient) reaper(ticker *time.Ticker) {
for _ = range ticker.C { for _ = range ticker.C {
c.clientsMx.RLock()
if c.closed {
c.clientsMx.RUnlock()
return
}
for _, client := range c.clients { for _, client := range c.clients {
pool := client.connPool pool := client.connPool
// pool.First removes idle connections from the pool for us. So // pool.First removes idle connections from the pool and
// just put returned connection back. // returns first non-idle connection. So just put returned
// connection back.
if cn := pool.First(); cn != nil { if cn := pool.First(); cn != nil {
pool.Put(cn) pool.Put(cn)
} }
} }
c.clientsMx.RUnlock()
} }
} }