Add PoolStats.StaleConns and enable logging by default

This commit is contained in:
Vladimir Mihailenco 2017-09-11 09:10:17 +03:00
parent 1173a9589f
commit 5294b5dae1
6 changed files with 26 additions and 29 deletions

View File

@ -804,8 +804,10 @@ func (c *ClusterClient) PoolStats() *PoolStats {
acc.Requests += s.Requests
acc.Hits += s.Hits
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
acc.FreeConns += s.FreeConns
acc.StaleConns += s.StaleConns
}
for _, node := range state.slaves {
@ -813,8 +815,10 @@ func (c *ClusterClient) PoolStats() *PoolStats {
acc.Requests += s.Requests
acc.Hits += s.Hits
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
acc.FreeConns += s.FreeConns
acc.StaleConns += s.StaleConns
}
return &acc
@ -873,21 +877,12 @@ func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
break
}
var n int
for _, node := range nodes {
nn, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
_, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
if err != nil {
internal.Logf("ReapStaleConns failed: %s", err)
} else {
n += nn
}
}
s := c.PoolStats()
internal.Logf(
"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
)
}
}

View File

@ -27,8 +27,9 @@ type Stats struct {
Hits uint32 // number of times free connection was found in the pool
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // the number of total connections in the pool
FreeConns uint32 // the number of free connections in the pool
TotalConns uint32 // number of total connections in the pool
FreeConns uint32 // number of free connections in the pool
StaleConns uint32 // number of stale connections removed from the pool
}
type Pooler interface {
@ -265,11 +266,13 @@ func (p *ConnPool) FreeLen() int {
func (p *ConnPool) Stats() *Stats {
return &Stats{
Requests: atomic.LoadUint32(&p.stats.Requests),
Hits: atomic.LoadUint32(&p.stats.Hits),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
Requests: atomic.LoadUint32(&p.stats.Requests),
Hits: atomic.LoadUint32(&p.stats.Hits),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
FreeConns: uint32(p.FreeLen()),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
@ -362,10 +365,6 @@ func (p *ConnPool) reaper(frequency time.Duration) {
internal.Logf("ReapStaleConns failed: %s", err)
continue
}
s := p.Stats()
internal.Logf(
"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
)
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
}
}

View File

@ -3,7 +3,6 @@ package redis_test
import (
"errors"
"fmt"
"log"
"net"
"os"
"os/exec"
@ -51,10 +50,6 @@ var cluster = &clusterScenario{
clients: make(map[string]*redis.Client, 6),
}
func init() {
redis.SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile))
}
var _ = BeforeSuite(func() {
var err error

View File

@ -205,6 +205,7 @@ type PoolStats struct {
Hits uint32 // number of times free connection was found in the pool
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // the number of total connections in the pool
FreeConns uint32 // the number of free connections in the pool
TotalConns uint32 // number of total connections in the pool
FreeConns uint32 // number of free connections in the pool
StaleConns uint32 // number of stale connections removed from the pool
}

View File

@ -3,6 +3,7 @@ package redis
import (
"fmt"
"log"
"os"
"time"
"github.com/go-redis/redis/internal"
@ -13,6 +14,10 @@ import (
// Redis nil reply, .e.g. when key does not exist.
const Nil = internal.Nil
func init() {
SetLogger(log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile))
}
func SetLogger(logger *log.Logger) {
internal.Logger = logger
}

View File

@ -301,8 +301,10 @@ func (d *sentinelFailover) listen(sentinel *sentinelClient) {
msg, err := pubsub.ReceiveMessage()
if err != nil {
internal.Logf("sentinel: ReceiveMessage failed: %s", err)
pubsub.Close()
if err != pool.ErrClosed {
internal.Logf("sentinel: ReceiveMessage failed: %s", err)
pubsub.Close()
}
d.resetSentinel()
return
}