Merge pull request #385 from go-redis/fix/enable-reaper-and-add-tests

Enable reaper on ClusterClient and add tests.
This commit is contained in:
Vladimir Mihailenco 2016-10-02 16:00:31 +03:00 committed by GitHub
commit 5a272d03b9
11 changed files with 257 additions and 169 deletions

View File

@ -45,20 +45,25 @@ var _ Cmdable = (*ClusterClient)(nil)
// http://redis.io/topics/cluster-spec.
func NewClusterClient(opt *ClusterOptions) *ClusterClient {
opt.init()
client := &ClusterClient{
c := &ClusterClient{
opt: opt,
nodes: make(map[string]*clusterNode),
cmdsInfoOnce: new(sync.Once),
}
client.cmdable.process = client.Process
c.cmdable.process = c.Process
for _, addr := range opt.Addrs {
_, _ = client.nodeByAddr(addr)
_, _ = c.nodeByAddr(addr)
}
client.reloadSlots()
c.reloadSlots()
return client
if opt.IdleCheckFrequency > 0 {
go c.reaper(opt.IdleCheckFrequency)
}
return c
}
func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
@ -333,11 +338,9 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
slots := c.slots
c.mu.RUnlock()
var retErr error
var mu sync.Mutex
var wg sync.WaitGroup
visited := make(map[*clusterNode]struct{})
errCh := make(chan error, 1)
for _, nodes := range slots {
if len(nodes) == 0 {
continue
@ -351,20 +354,24 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
wg.Add(1)
go func(node *clusterNode) {
defer wg.Done()
err := fn(node.Client)
if err != nil {
mu.Lock()
if retErr == nil {
retErr = err
select {
case errCh <- err:
default:
}
mu.Unlock()
}
wg.Done()
}(master)
}
wg.Wait()
return retErr
select {
case err := <-errCh:
return err
default:
return nil
}
}
// closeClients closes all clients and returns the first error if there are any.
@ -442,8 +449,8 @@ func (c *ClusterClient) setNodesLatency() {
}
// reaper closes idle connections to the cluster.
func (c *ClusterClient) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
ticker := time.NewTicker(idleCheckFrequency)
defer ticker.Stop()
for _ = range ticker.C {

View File

@ -2,7 +2,6 @@ package redis_test
import (
"fmt"
"math/rand"
"net"
"strconv"
"strings"
@ -50,17 +49,6 @@ func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.Cluste
for i, port := range s.ports {
addrs[i] = net.JoinHostPort("127.0.0.1", port)
}
if opt == nil {
opt = &redis.ClusterOptions{
DialTimeout: 10 * time.Second,
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
PoolSize: 10,
PoolTimeout: 30 * time.Second,
IdleTimeout: time.Second,
IdleCheckFrequency: time.Second,
}
}
opt.Addrs = addrs
return redis.NewClusterClient(opt)
}
@ -193,52 +181,12 @@ func stopCluster(scenario *clusterScenario) error {
//------------------------------------------------------------------------------
var _ = Describe("Cluster", func() {
Describe("HashSlot", func() {
It("should calculate hash slots", func() {
tests := []struct {
key string
slot int
}{
{"123456789", 12739},
{"{}foo", 9500},
{"foo{}", 5542},
{"foo{}{bar}", 8363},
{"", 10503},
{"", 5176},
{string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 5463},
}
// Empty keys receive random slot.
rand.Seed(100)
for _, test := range tests {
Expect(hashtag.Slot(test.key)).To(Equal(test.slot), "for %s", test.key)
}
})
It("should extract keys from tags", func() {
tests := []struct {
one, two string
}{
{"foo{bar}", "bar"},
{"{foo}bar", "foo"},
{"{user1000}.following", "{user1000}.followers"},
{"foo{{bar}}zap", "{bar"},
{"foo{bar}{zap}", "bar"},
}
for _, test := range tests {
Expect(hashtag.Slot(test.one)).To(Equal(hashtag.Slot(test.two)), "for %s <-> %s", test.one, test.two)
}
})
})
Describe("Commands", func() {
var _ = Describe("ClusterClient", func() {
var client *redis.ClusterClient
describeClusterClient := func() {
It("should CLUSTER SLOTS", func() {
res, err := cluster.primary().ClusterSlots().Result()
res, err := client.ClusterSlots().Result()
Expect(err).NotTo(HaveOccurred())
Expect(res).To(HaveLen(3))
@ -251,73 +199,48 @@ var _ = Describe("Cluster", func() {
})
It("should CLUSTER NODES", func() {
res, err := cluster.primary().ClusterNodes().Result()
res, err := client.ClusterNodes().Result()
Expect(err).NotTo(HaveOccurred())
Expect(len(res)).To(BeNumerically(">", 400))
})
It("should CLUSTER INFO", func() {
res, err := cluster.primary().ClusterInfo().Result()
res, err := client.ClusterInfo().Result()
Expect(err).NotTo(HaveOccurred())
Expect(res).To(ContainSubstring("cluster_known_nodes:6"))
})
It("should CLUSTER KEYSLOT", func() {
hashSlot, err := cluster.primary().ClusterKeySlot("somekey").Result()
hashSlot, err := client.ClusterKeySlot("somekey").Result()
Expect(err).NotTo(HaveOccurred())
Expect(hashSlot).To(Equal(int64(hashtag.Slot("somekey"))))
})
It("should CLUSTER COUNT-FAILURE-REPORTS", func() {
n, err := cluster.primary().ClusterCountFailureReports(cluster.nodeIds[0]).Result()
n, err := client.ClusterCountFailureReports(cluster.nodeIds[0]).Result()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(0)))
})
It("should CLUSTER COUNTKEYSINSLOT", func() {
n, err := cluster.primary().ClusterCountKeysInSlot(10).Result()
n, err := client.ClusterCountKeysInSlot(10).Result()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(0)))
})
It("should CLUSTER DELSLOTS", func() {
res, err := cluster.primary().ClusterDelSlotsRange(16000, 16384-1).Result()
Expect(err).NotTo(HaveOccurred())
Expect(res).To(Equal("OK"))
cluster.primary().ClusterAddSlotsRange(16000, 16384-1)
})
It("should CLUSTER SAVECONFIG", func() {
res, err := cluster.primary().ClusterSaveConfig().Result()
res, err := client.ClusterSaveConfig().Result()
Expect(err).NotTo(HaveOccurred())
Expect(res).To(Equal("OK"))
})
It("should CLUSTER SLAVES", func() {
nodesList, err := cluster.primary().ClusterSlaves(cluster.nodeIds[0]).Result()
nodesList, err := client.ClusterSlaves(cluster.nodeIds[0]).Result()
Expect(err).NotTo(HaveOccurred())
Expect(nodesList).Should(ContainElement(ContainSubstring("slave")))
Expect(nodesList).Should(HaveLen(1))
})
// It("should CLUSTER READONLY", func() {
// res, err := cluster.primary().ReadOnly().Result()
// Expect(err).NotTo(HaveOccurred())
// Expect(res).To(Equal("OK"))
// })
// It("should CLUSTER READWRITE", func() {
// res, err := cluster.primary().ReadWrite().Result()
// Expect(err).NotTo(HaveOccurred())
// Expect(res).To(Equal("OK"))
// })
})
})
var _ = Describe("ClusterClient", func() {
var client *redis.ClusterClient
describeClusterClient := func() {
It("should GET/SET/DEL", func() {
val, err := client.Get("A").Result()
Expect(err).To(Equal(redis.Nil))
@ -340,6 +263,18 @@ var _ = Describe("ClusterClient", func() {
Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
})
It("removes idle connections", func() {
stats := client.PoolStats()
Expect(stats.TotalConns).NotTo(BeZero())
Expect(stats.FreeConns).NotTo(BeZero())
time.Sleep(2 * time.Second)
stats = client.PoolStats()
Expect(stats.TotalConns).To(BeZero())
Expect(stats.FreeConns).To(BeZero())
})
It("follows redirects", func() {
Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred())
@ -352,9 +287,9 @@ var _ = Describe("ClusterClient", func() {
})
It("returns an error when there are no attempts left", func() {
client := cluster.clusterClient(&redis.ClusterOptions{
MaxRedirects: -1,
})
opt := redisClusterOptions()
opt.MaxRedirects = -1
client := cluster.clusterClient(opt)
slot := hashtag.Slot("A")
Expect(client.SwapSlotNodes(slot)).To(Equal([]string{"127.0.0.1:8224", "127.0.0.1:8221"}))
@ -483,7 +418,7 @@ var _ = Describe("ClusterClient", func() {
Describe("default ClusterClient", func() {
BeforeEach(func() {
client = cluster.clusterClient(nil)
client = cluster.clusterClient(redisClusterOptions())
_ = client.ForEachMaster(func(master *redis.Client) error {
return master.FlushDb().Err()
@ -499,9 +434,9 @@ var _ = Describe("ClusterClient", func() {
Describe("ClusterClient with RouteByLatency", func() {
BeforeEach(func() {
client = cluster.clusterClient(&redis.ClusterOptions{
RouteByLatency: true,
})
opt := redisClusterOptions()
opt.RouteByLatency = true
client = cluster.clusterClient(opt)
_ = client.ForEachMaster(func(master *redis.Client) error {
return master.FlushDb().Err()
@ -543,11 +478,13 @@ func BenchmarkRedisClusterPing(b *testing.B) {
processes: make(map[string]*redisProcess, 6),
clients: make(map[string]*redis.Client, 6),
}
if err := startCluster(cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)
client := cluster.clusterClient(nil)
client := cluster.clusterClient(redisClusterOptions())
defer client.Close()
b.ResetTimer()

View File

@ -1297,7 +1297,7 @@ var _ = Describe("Commands", func() {
stats := client.PoolStats()
Expect(stats.Requests).To(Equal(uint32(3)))
Expect(stats.Hits).To(Equal(uint32(2)))
Expect(stats.Hits).To(Equal(uint32(1)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})

View File

@ -1,10 +1,18 @@
package hashtag
import (
"math/rand"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestGinkgoSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "hashtag")
}
var _ = Describe("CRC16", func() {
// http://redis.io/topics/cluster-spec#keys-distribution-model
@ -23,3 +31,44 @@ var _ = Describe("CRC16", func() {
})
})
var _ = Describe("HashSlot", func() {
It("should calculate hash slots", func() {
tests := []struct {
key string
slot int
}{
{"123456789", 12739},
{"{}foo", 9500},
{"foo{}", 5542},
{"foo{}{bar}", 8363},
{"", 10503},
{"", 5176},
{string([]byte{83, 153, 134, 118, 229, 214, 244, 75, 140, 37, 215, 215}), 5463},
}
// Empty keys receive random slot.
rand.Seed(100)
for _, test := range tests {
Expect(Slot(test.key)).To(Equal(test.slot), "for %s", test.key)
}
})
It("should extract keys from tags", func() {
tests := []struct {
one, two string
}{
{"foo{bar}", "bar"},
{"{foo}bar", "foo"},
{"{user1000}.following", "{user1000}.followers"},
{"foo{{bar}}zap", "{bar"},
{"foo{bar}{zap}", "bar"},
}
for _, test := range tests {
Expect(Slot(test.one)).To(Equal(Slot(test.two)), "for %s <-> %s", test.one, test.two)
}
})
})

View File

@ -25,8 +25,8 @@ var timers = sync.Pool{
},
}
// PoolStats contains pool state information and accumulated stats.
type PoolStats struct {
// Stats contains pool state information and accumulated stats.
type Stats struct {
Requests uint32 // number of times a connection was requested by the pool
Hits uint32 // number of times free connection was found in the pool
Timeouts uint32 // number of times a wait timeout occurred
@ -41,7 +41,7 @@ type Pooler interface {
Remove(*Conn, error) error
Len() int
FreeLen() int
Stats() *PoolStats
Stats() *Stats
Close() error
Closed() bool
}
@ -64,7 +64,7 @@ type ConnPool struct {
freeConnsMu sync.Mutex
freeConns []*Conn
stats PoolStats
stats Stats
_closed int32 // atomic
lastErr atomic.Value
@ -173,16 +173,22 @@ func (p *ConnPool) Get() (*Conn, bool, error) {
return nil, false, ErrPoolTimeout
}
p.freeConnsMu.Lock()
cn := p.popFree()
p.freeConnsMu.Unlock()
for {
p.freeConnsMu.Lock()
cn := p.popFree()
p.freeConnsMu.Unlock()
if cn != nil {
atomic.AddUint32(&p.stats.Hits, 1)
if !cn.IsStale(p.idleTimeout) {
return cn, false, nil
if cn == nil {
break
}
_ = p.closeConn(cn, errConnStale)
if cn.IsStale(p.idleTimeout) {
p.remove(cn, errConnStale)
continue
}
atomic.AddUint32(&p.stats.Hits, 1)
return cn, false, nil
}
newcn, err := p.NewConn()
@ -192,9 +198,6 @@ func (p *ConnPool) Get() (*Conn, bool, error) {
}
p.connsMu.Lock()
if cn != nil {
p.removeConn(cn)
}
p.conns = append(p.conns, newcn)
p.connsMu.Unlock()
@ -224,17 +227,13 @@ func (p *ConnPool) remove(cn *Conn, reason error) {
_ = p.closeConn(cn, reason)
p.connsMu.Lock()
p.removeConn(cn)
p.connsMu.Unlock()
}
func (p *ConnPool) removeConn(cn *Conn) {
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
break
}
}
p.connsMu.Unlock()
}
// Len returns total number of connections.
@ -253,14 +252,14 @@ func (p *ConnPool) FreeLen() int {
return l
}
func (p *ConnPool) Stats() *PoolStats {
stats := PoolStats{}
stats.Requests = atomic.LoadUint32(&p.stats.Requests)
stats.Hits = atomic.LoadUint32(&p.stats.Hits)
stats.Timeouts = atomic.LoadUint32(&p.stats.Timeouts)
stats.TotalConns = uint32(p.Len())
stats.FreeConns = uint32(p.FreeLen())
return &stats
func (p *ConnPool) Stats() *Stats {
return &Stats{
Requests: atomic.LoadUint32(&p.stats.Requests),
Hits: atomic.LoadUint32(&p.stats.Hits),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
FreeConns: uint32(p.FreeLen()),
}
}
func (p *ConnPool) Closed() bool {

View File

@ -42,7 +42,7 @@ func (p *SingleConnPool) FreeLen() int {
return 0
}
func (p *SingleConnPool) Stats() *PoolStats {
func (p *SingleConnPool) Stats() *Stats {
return nil
}

View File

@ -106,7 +106,9 @@ func (p *StickyConnPool) FreeLen() int {
return 0
}
func (p *StickyConnPool) Stats() *PoolStats { return nil }
func (p *StickyConnPool) Stats() *Stats {
return nil
}
func (p *StickyConnPool) Close() error {
defer p.mx.Unlock()

View File

@ -108,8 +108,36 @@ func redisOptions() *redis.Options {
WriteTimeout: 30 * time.Second,
PoolSize: 10,
PoolTimeout: 30 * time.Second,
IdleTimeout: time.Second,
IdleCheckFrequency: time.Second,
IdleTimeout: 500 * time.Millisecond,
IdleCheckFrequency: 500 * time.Millisecond,
}
}
func redisClusterOptions() *redis.ClusterOptions {
return &redis.ClusterOptions{
DialTimeout: 10 * time.Second,
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
PoolSize: 10,
PoolTimeout: 30 * time.Second,
IdleTimeout: 500 * time.Millisecond,
IdleCheckFrequency: 500 * time.Millisecond,
}
}
func redisRingOptions() *redis.RingOptions {
return &redis.RingOptions{
Addrs: map[string]string{
"ringShardOne": ":" + ringShard1Port,
"ringShardTwo": ":" + ringShard2Port,
},
DialTimeout: 10 * time.Second,
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
PoolSize: 10,
PoolTimeout: 30 * time.Second,
IdleTimeout: 500 * time.Millisecond,
IdleCheckFrequency: 500 * time.Millisecond,
}
}

View File

@ -1,6 +1,8 @@
package redis_test
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -20,7 +22,7 @@ var _ = Describe("pool", func() {
Expect(client.Close()).NotTo(HaveOccurred())
})
It("should respect max size", func() {
It("respects max size", func() {
perform(1000, func(id int) {
val, err := client.Ping().Result()
Expect(err).NotTo(HaveOccurred())
@ -33,7 +35,7 @@ var _ = Describe("pool", func() {
Expect(pool.Len()).To(Equal(pool.FreeLen()))
})
It("should respect max on multi", func() {
It("srespect max size on multi", func() {
perform(1000, func(id int) {
var ping *redis.StatusCmd
@ -58,7 +60,7 @@ var _ = Describe("pool", func() {
Expect(pool.Len()).To(Equal(pool.FreeLen()))
})
It("should respect max on pipelines", func() {
It("respects max size on pipelines", func() {
perform(1000, func(id int) {
pipe := client.Pipeline()
ping := pipe.Ping()
@ -76,7 +78,7 @@ var _ = Describe("pool", func() {
Expect(pool.Len()).To(Equal(pool.FreeLen()))
})
It("should respect max on pubsub", func() {
It("respects max size on pubsub", func() {
connPool := client.Pool()
connPool.(*pool.ConnPool).DialLimiter = nil
@ -90,7 +92,7 @@ var _ = Describe("pool", func() {
Expect(connPool.Len()).To(BeNumerically("<=", 10))
})
It("should remove broken connections", func() {
It("removes broken connections", func() {
cn, _, err := client.Pool().Get()
Expect(err).NotTo(HaveOccurred())
cn.NetConn = &badConn{}
@ -113,7 +115,7 @@ var _ = Describe("pool", func() {
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
It("should reuse connections", func() {
It("reuses connections", func() {
for i := 0; i < 100; i++ {
val, err := client.Ping().Result()
Expect(err).NotTo(HaveOccurred())
@ -129,4 +131,26 @@ var _ = Describe("pool", func() {
Expect(stats.Hits).To(Equal(uint32(100)))
Expect(stats.Timeouts).To(Equal(uint32(0)))
})
It("removes idle connections", func() {
stats := client.PoolStats()
Expect(stats).To(Equal(&redis.PoolStats{
Requests: 1,
Hits: 0,
Timeouts: 0,
TotalConns: 1,
FreeConns: 1,
}))
time.Sleep(2 * time.Second)
stats = client.PoolStats()
Expect(stats).To(Equal(&redis.PoolStats{
Requests: 1,
Hits: 0,
Timeouts: 0,
TotalConns: 0,
FreeConns: 0,
}))
})
})

55
ring.go
View File

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"gopkg.in/redis.v4/internal"
@ -65,7 +66,7 @@ func (opt *RingOptions) clientOptions() *Options {
type ringShard struct {
Client *Client
down int
down int32
}
func (shard *ringShard) String() string {
@ -80,7 +81,7 @@ func (shard *ringShard) String() string {
func (shard *ringShard) IsDown() bool {
const threshold = 3
return shard.down >= threshold
return atomic.LoadInt32(&shard.down) >= threshold
}
func (shard *ringShard) IsUp() bool {
@ -91,7 +92,7 @@ func (shard *ringShard) IsUp() bool {
func (shard *ringShard) Vote(up bool) bool {
if up {
changed := shard.IsDown()
shard.down = 0
atomic.StoreInt32(&shard.down, 0)
return changed
}
@ -99,7 +100,7 @@ func (shard *ringShard) Vote(up bool) bool {
return false
}
shard.down++
atomic.AddInt32(&shard.down, 1)
return shard.IsDown()
}
@ -157,6 +158,52 @@ func NewRing(opt *RingOptions) *Ring {
return ring
}
// PoolStats returns accumulated connection pool stats.
func (c *Ring) PoolStats() *PoolStats {
var acc PoolStats
for _, shard := range c.shards {
s := shard.Client.connPool.Stats()
acc.Requests += s.Requests
acc.Hits += s.Hits
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
acc.FreeConns += s.FreeConns
}
return &acc
}
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
func (c *Ring) ForEachShard(fn func(client *Client) error) error {
var wg sync.WaitGroup
errCh := make(chan error, 1)
for _, shard := range c.shards {
if shard.IsDown() {
continue
}
wg.Add(1)
go func(shard *ringShard) {
defer wg.Done()
err := fn(shard.Client)
if err != nil {
select {
case errCh <- err:
default:
}
}
}(shard)
}
wg.Wait()
select {
case err := <-errCh:
return err
default:
return nil
}
}
func (c *Ring) cmdInfo(name string) *CommandInfo {
c.cmdsInfoOnce.Do(func() {
for _, shard := range c.shards {

View File

@ -11,8 +11,9 @@ import (
"gopkg.in/redis.v4"
)
var _ = Describe("Redis ring", func() {
var _ = Describe("Redis Ring", func() {
const heartbeat = 100 * time.Millisecond
var ring *redis.Ring
setRingKeys := func() {
@ -23,20 +24,14 @@ var _ = Describe("Redis ring", func() {
}
BeforeEach(func() {
ring = redis.NewRing(&redis.RingOptions{
Addrs: map[string]string{
"ringShardOne": ":" + ringShard1Port,
"ringShardTwo": ":" + ringShard2Port,
},
HeartbeatFrequency: heartbeat,
opt := redisRingOptions()
opt.HeartbeatFrequency = heartbeat
ring = redis.NewRing(opt)
err := ring.ForEachShard(func(cl *redis.Client) error {
return cl.FlushDb().Err()
})
// Shards should not have any keys.
Expect(ringShard1.FlushDb().Err()).NotTo(HaveOccurred())
Expect(ringShard1.Info().Val()).NotTo(ContainSubstring("keys="))
Expect(ringShard2.FlushDb().Err()).NotTo(HaveOccurred())
Expect(ringShard2.Info().Val()).NotTo(ContainSubstring("keys="))
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {