mirror of https://github.com/go-redis/redis.git
Merge pull request #95 from go-redis/fix/close-client-and-reaper
Implement Close and fix reaper goroutine leak.
This commit is contained in:
commit
f7a1636422
51
cluster.go
51
cluster.go
|
@ -13,10 +13,11 @@ type ClusterClient struct {
|
||||||
|
|
||||||
addrs []string
|
addrs []string
|
||||||
slots [][]string
|
slots [][]string
|
||||||
slotsMx sync.RWMutex // protects slots & addrs cache
|
slotsMx sync.RWMutex // Protects slots and addrs.
|
||||||
|
|
||||||
clients map[string]*Client
|
clients map[string]*Client
|
||||||
clientsMx sync.RWMutex
|
closed bool
|
||||||
|
clientsMx sync.RWMutex // Protects clients and closed.
|
||||||
|
|
||||||
opt *ClusterOptions
|
opt *ClusterOptions
|
||||||
|
|
||||||
|
@ -35,20 +36,27 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
|
||||||
}
|
}
|
||||||
client.commandable.process = client.process
|
client.commandable.process = client.process
|
||||||
client.reloadIfDue()
|
client.reloadIfDue()
|
||||||
go client.reaper(time.NewTicker(5 * time.Minute))
|
go client.reaper()
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the cluster client.
|
// Close closes the cluster client, releasing any open resources.
|
||||||
|
//
|
||||||
|
// It is rare to Close a Client, as the Client is meant to be
|
||||||
|
// long-lived and shared between many goroutines.
|
||||||
func (c *ClusterClient) Close() error {
|
func (c *ClusterClient) Close() error {
|
||||||
// TODO: close should make client unusable
|
defer c.clientsMx.Unlock()
|
||||||
c.setSlots(nil)
|
c.clientsMx.Lock()
|
||||||
|
|
||||||
|
if c.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.closed = true
|
||||||
c.resetClients()
|
c.resetClients()
|
||||||
|
c.setSlots(nil)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// getClient returns a Client for a given address.
|
// getClient returns a Client for a given address.
|
||||||
func (c *ClusterClient) getClient(addr string) (*Client, error) {
|
func (c *ClusterClient) getClient(addr string) (*Client, error) {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
|
@ -64,6 +72,11 @@ func (c *ClusterClient) getClient(addr string) (*Client, error) {
|
||||||
c.clientsMx.RUnlock()
|
c.clientsMx.RUnlock()
|
||||||
|
|
||||||
c.clientsMx.Lock()
|
c.clientsMx.Lock()
|
||||||
|
if c.closed {
|
||||||
|
c.clientsMx.Unlock()
|
||||||
|
return nil, errClosed
|
||||||
|
}
|
||||||
|
|
||||||
client, ok = c.clients[addr]
|
client, ok = c.clients[addr]
|
||||||
if !ok {
|
if !ok {
|
||||||
opt := c.opt.clientOptions()
|
opt := c.opt.clientOptions()
|
||||||
|
@ -83,7 +96,7 @@ func (c *ClusterClient) slotAddrs(slot int) []string {
|
||||||
return addrs
|
return addrs
|
||||||
}
|
}
|
||||||
|
|
||||||
// randomClient returns a Client for the first live node.
|
// randomClient returns a Client for the first pingable node.
|
||||||
func (c *ClusterClient) randomClient() (client *Client, err error) {
|
func (c *ClusterClient) randomClient() (client *Client, err error) {
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
n := rand.Intn(len(c.addrs))
|
n := rand.Intn(len(c.addrs))
|
||||||
|
@ -165,14 +178,12 @@ func (c *ClusterClient) process(cmd Cmder) {
|
||||||
|
|
||||||
// Closes all clients and returns last error if there are any.
|
// Closes all clients and returns last error if there are any.
|
||||||
func (c *ClusterClient) resetClients() (err error) {
|
func (c *ClusterClient) resetClients() (err error) {
|
||||||
c.clientsMx.Lock()
|
|
||||||
for addr, client := range c.clients {
|
for addr, client := range c.clients {
|
||||||
if e := client.Close(); e != nil {
|
if e := client.Close(); e != nil {
|
||||||
err = e
|
err = e
|
||||||
}
|
}
|
||||||
delete(c.clients, addr)
|
delete(c.clients, addr)
|
||||||
}
|
}
|
||||||
c.clientsMx.Unlock()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,16 +240,28 @@ func (c *ClusterClient) scheduleReload() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// reaper closes idle connections to the cluster.
|
// reaper closes idle connections to the cluster.
|
||||||
func (c *ClusterClient) reaper(ticker *time.Ticker) {
|
func (c *ClusterClient) reaper() {
|
||||||
|
ticker := time.NewTicker(time.Minute)
|
||||||
|
defer ticker.Stop()
|
||||||
for _ = range ticker.C {
|
for _ = range ticker.C {
|
||||||
|
c.clientsMx.RLock()
|
||||||
|
|
||||||
|
if c.closed {
|
||||||
|
c.clientsMx.RUnlock()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
for _, client := range c.clients {
|
for _, client := range c.clients {
|
||||||
pool := client.connPool
|
pool := client.connPool
|
||||||
// pool.First removes idle connections from the pool for us. So
|
// pool.First removes idle connections from the pool and
|
||||||
// just put returned connection back.
|
// returns first non-idle connection. So just put returned
|
||||||
|
// connection back.
|
||||||
if cn := pool.First(); cn != nil {
|
if cn := pool.First(); cn != nil {
|
||||||
pool.Put(cn)
|
pool.Put(cn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.clientsMx.RUnlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,6 +83,7 @@ var _ = Describe("ClusterClient", func() {
|
||||||
Expect(subject.slots[8191]).To(BeEmpty())
|
Expect(subject.slots[8191]).To(BeEmpty())
|
||||||
Expect(subject.slots[8192]).To(BeEmpty())
|
Expect(subject.slots[8192]).To(BeEmpty())
|
||||||
Expect(subject.slots[16383]).To(BeEmpty())
|
Expect(subject.slots[16383]).To(BeEmpty())
|
||||||
|
Expect(subject.Ping().Err().Error()).To(Equal("redis: client is closed"))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should check if reload is due", func() {
|
It("should check if reload is due", func() {
|
||||||
|
|
Loading…
Reference in New Issue