mirror of https://github.com/go-redis/redis.git
Merge branch 'master' of github.com:go-redis/redis
This commit is contained in:
commit
c44a9aa16f
|
@ -5,18 +5,14 @@ services:
|
|||
- redis-server
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
go_import_path: github.com/go-redis/redis
|
||||
|
||||
before_install:
|
||||
|
|
|
@ -47,7 +47,7 @@ func (s *clusterScenario) addrs() []string {
|
|||
return addrs
|
||||
}
|
||||
|
||||
func (s *clusterScenario) newClusterClientUnsafe(opt *redis.ClusterOptions) *redis.ClusterClient {
|
||||
func (s *clusterScenario) newClusterClientUnstable(opt *redis.ClusterOptions) *redis.ClusterClient {
|
||||
opt.Addrs = s.addrs()
|
||||
return redis.NewClusterClient(opt)
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func (s *clusterScenario) newClusterClientUnsafe(opt *redis.ClusterOptions) *red
|
|||
func (s *clusterScenario) newClusterClient(
|
||||
ctx context.Context, opt *redis.ClusterOptions,
|
||||
) *redis.ClusterClient {
|
||||
client := s.newClusterClientUnsafe(opt)
|
||||
client := s.newClusterClientUnstable(opt)
|
||||
|
||||
err := eventually(func() error {
|
||||
if opt.ClusterSlots != nil {
|
||||
|
@ -873,59 +873,6 @@ var _ = Describe("ClusterClient", func() {
|
|||
assertClusterClient()
|
||||
})
|
||||
|
||||
Describe("ClusterClient failover", func() {
|
||||
BeforeEach(func() {
|
||||
failover = true
|
||||
|
||||
opt = redisClusterOptions()
|
||||
opt.MinRetryBackoff = 250 * time.Millisecond
|
||||
opt.MaxRetryBackoff = time.Second
|
||||
client = cluster.newClusterClient(ctx, opt)
|
||||
|
||||
err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
|
||||
return master.FlushDB(ctx).Err()
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
|
||||
defer GinkgoRecover()
|
||||
|
||||
Eventually(func() int64 {
|
||||
return slave.DBSize(ctx).Val()
|
||||
}, "30s").Should(Equal(int64(0)))
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
state, err := client.LoadState(ctx)
|
||||
Eventually(func() bool {
|
||||
state, err = client.LoadState(ctx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return state.IsConsistent(ctx)
|
||||
}, "30s").Should(BeTrue())
|
||||
|
||||
for _, slave := range state.Slaves {
|
||||
err = slave.Client.ClusterFailover(ctx).Err()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(func() bool {
|
||||
state, _ := client.LoadState(ctx)
|
||||
return state.IsConsistent(ctx)
|
||||
}, "30s").Should(BeTrue())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
failover = false
|
||||
Expect(client.Close()).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
assertClusterClient()
|
||||
})
|
||||
|
||||
Describe("ClusterClient with RouteByLatency", func() {
|
||||
BeforeEach(func() {
|
||||
opt = redisClusterOptions()
|
||||
|
@ -1132,7 +1079,7 @@ var _ = Describe("ClusterClient with unavailable Cluster", func() {
|
|||
opt.ReadTimeout = 250 * time.Millisecond
|
||||
opt.WriteTimeout = 250 * time.Millisecond
|
||||
opt.MaxRedirects = 1
|
||||
client = cluster.newClusterClientUnsafe(opt)
|
||||
client = cluster.newClusterClientUnstable(opt)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
|
|
|
@ -19,6 +19,7 @@ func (c *PubSub) SetNetConn(netConn net.Conn) {
|
|||
}
|
||||
|
||||
func (c *ClusterClient) LoadState(ctx context.Context) (*clusterState, error) {
|
||||
// return c.state.Reload(ctx)
|
||||
return c.loadState(ctx)
|
||||
}
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ func newConnPool(opt *Options) *pool.ConnPool {
|
|||
)
|
||||
conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
|
||||
if err != nil {
|
||||
internal.RecordError(ctx, err)
|
||||
_ = internal.RecordError(ctx, err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue