clusterStateHolder.Get should load a state when there is none

This commit is contained in:
Vladimir Mihailenco 2018-12-13 12:26:02 +02:00
parent f18a97fc94
commit 382feca784
2 changed files with 46 additions and 41 deletions

View File

@ -3,7 +3,6 @@ package redis
import ( import (
"context" "context"
"crypto/tls" "crypto/tls"
"errors"
"fmt" "fmt"
"math" "math"
"math/rand" "math/rand"
@ -595,10 +594,6 @@ type clusterStateHolder struct {
load func() (*clusterState, error) load func() (*clusterState, error)
state atomic.Value state atomic.Value
firstErrMu sync.RWMutex
firstErr error
reloading uint32 // atomic reloading uint32 // atomic
} }
@ -609,21 +604,8 @@ func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder
} }
func (c *clusterStateHolder) Reload() (*clusterState, error) { func (c *clusterStateHolder) Reload() (*clusterState, error) {
state, err := c.reload()
if err != nil {
return nil, err
}
return state, nil
}
func (c *clusterStateHolder) reload() (*clusterState, error) {
state, err := c.load() state, err := c.load()
if err != nil { if err != nil {
c.firstErrMu.Lock()
if c.firstErr == nil {
c.firstErr = err
}
c.firstErrMu.Unlock()
return nil, err return nil, err
} }
c.state.Store(state) c.state.Store(state)
@ -637,7 +619,7 @@ func (c *clusterStateHolder) LazyReload() {
go func() { go func() {
defer atomic.StoreUint32(&c.reloading, 0) defer atomic.StoreUint32(&c.reloading, 0)
_, err := c.reload() _, err := c.Reload()
if err != nil { if err != nil {
return return
} }
@ -654,15 +636,7 @@ func (c *clusterStateHolder) Get() (*clusterState, error) {
} }
return state, nil return state, nil
} }
return c.Reload()
c.firstErrMu.RLock()
err := c.firstErr
c.firstErrMu.RUnlock()
if err != nil {
return nil, err
}
return nil, errors.New("redis: cluster has no state")
} }
func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) { func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
@ -710,10 +684,6 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
c.processTxPipeline = c.defaultProcessTxPipeline c.processTxPipeline = c.defaultProcessTxPipeline
c.init() c.init()
_, _ = c.state.Reload()
_, _ = c.cmdsInfoCache.Get()
if opt.IdleCheckFrequency > 0 { if opt.IdleCheckFrequency > 0 {
go c.reaper(opt.IdleCheckFrequency) go c.reaper(opt.IdleCheckFrequency)
} }
@ -721,17 +691,17 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
return c return c
} }
// ReloadState reloads cluster state. It calls ClusterSlots func func (c *ClusterClient) init() {
c.cmdable.setProcessor(c.Process)
}
// ReloadState reloads cluster state. If available it calls ClusterSlots func
// to get cluster slots information. // to get cluster slots information.
func (c *ClusterClient) ReloadState() error { func (c *ClusterClient) ReloadState() error {
_, err := c.state.Reload() _, err := c.state.Reload()
return err return err
} }
func (c *ClusterClient) init() {
c.cmdable.setProcessor(c.Process)
}
func (c *ClusterClient) Context() context.Context { func (c *ClusterClient) Context() context.Context {
if c.ctx != nil { if c.ctx != nil {
return c.ctx return c.ctx

View File

@ -48,9 +48,14 @@ func (s *clusterScenario) addrs() []string {
return addrs return addrs
} }
func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient { func (s *clusterScenario) clusterClientUnsafe(opt *redis.ClusterOptions) *redis.ClusterClient {
opt.Addrs = s.addrs() opt.Addrs = s.addrs()
client := redis.NewClusterClient(opt) return redis.NewClusterClient(opt)
}
func (s *clusterScenario) clusterClient(opt *redis.ClusterOptions) *redis.ClusterClient {
client := s.clusterClientUnsafe(opt)
err := eventually(func() error { err := eventually(func() error {
if opt.ClusterSlots != nil { if opt.ClusterSlots != nil {
@ -932,6 +937,36 @@ var _ = Describe("ClusterClient without valid nodes", func() {
}) })
}) })
var _ = Describe("ClusterClient with unavailable Cluster", func() {
var client *redis.ClusterClient
BeforeEach(func() {
for _, node := range cluster.clients {
err := node.ClientPause(5 * time.Second).Err()
Expect(err).NotTo(HaveOccurred())
}
opt := redisClusterOptions()
opt.ReadTimeout = 250 * time.Millisecond
opt.WriteTimeout = 250 * time.Millisecond
opt.MaxRedirects = 1
client = cluster.clusterClientUnsafe(opt)
})
AfterEach(func() {
Expect(client.Close()).NotTo(HaveOccurred())
})
It("recovers when Cluster recovers", func() {
err := client.Ping().Err()
Expect(err).To(HaveOccurred())
Eventually(func() error {
return client.Ping().Err()
}, "30s").ShouldNot(HaveOccurred())
})
})
var _ = Describe("ClusterClient timeout", func() { var _ = Describe("ClusterClient timeout", func() {
var client *redis.ClusterClient var client *redis.ClusterClient
@ -976,7 +1011,7 @@ var _ = Describe("ClusterClient timeout", func() {
}) })
} }
const pause = 3 * time.Second const pause = 5 * time.Second
Context("read/write timeout", func() { Context("read/write timeout", func() {
BeforeEach(func() { BeforeEach(func() {