Merge branch 'master' of github.com:go-redis/redis

This commit is contained in:
Vladimir Mihailenco 2020-09-05 12:36:31 +03:00
commit c44a9aa16f
4 changed files with 6 additions and 62 deletions

View File

@ -5,18 +5,14 @@ services:
- redis-server - redis-server
go: go:
- 1.12.x
- 1.13.x
- 1.14.x - 1.14.x
- 1.15.x
- tip - tip
matrix: matrix:
allow_failures: allow_failures:
- go: tip - go: tip
env:
- GO111MODULE=on
go_import_path: github.com/go-redis/redis go_import_path: github.com/go-redis/redis
before_install: before_install:

View File

@ -47,7 +47,7 @@ func (s *clusterScenario) addrs() []string {
return addrs return addrs
} }
func (s *clusterScenario) newClusterClientUnsafe(opt *redis.ClusterOptions) *redis.ClusterClient { func (s *clusterScenario) newClusterClientUnstable(opt *redis.ClusterOptions) *redis.ClusterClient {
opt.Addrs = s.addrs() opt.Addrs = s.addrs()
return redis.NewClusterClient(opt) return redis.NewClusterClient(opt)
} }
@ -55,7 +55,7 @@ func (s *clusterScenario) newClusterClientUnsafe(opt *redis.ClusterOptions) *red
func (s *clusterScenario) newClusterClient( func (s *clusterScenario) newClusterClient(
ctx context.Context, opt *redis.ClusterOptions, ctx context.Context, opt *redis.ClusterOptions,
) *redis.ClusterClient { ) *redis.ClusterClient {
client := s.newClusterClientUnsafe(opt) client := s.newClusterClientUnstable(opt)
err := eventually(func() error { err := eventually(func() error {
if opt.ClusterSlots != nil { if opt.ClusterSlots != nil {
@ -873,59 +873,6 @@ var _ = Describe("ClusterClient", func() {
assertClusterClient() assertClusterClient()
}) })
Describe("ClusterClient failover", func() {
BeforeEach(func() {
failover = true
opt = redisClusterOptions()
opt.MinRetryBackoff = 250 * time.Millisecond
opt.MaxRetryBackoff = time.Second
client = cluster.newClusterClient(ctx, opt)
err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error {
return master.FlushDB(ctx).Err()
})
Expect(err).NotTo(HaveOccurred())
err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error {
defer GinkgoRecover()
Eventually(func() int64 {
return slave.DBSize(ctx).Val()
}, "30s").Should(Equal(int64(0)))
return nil
})
Expect(err).NotTo(HaveOccurred())
state, err := client.LoadState(ctx)
Eventually(func() bool {
state, err = client.LoadState(ctx)
if err != nil {
return false
}
return state.IsConsistent(ctx)
}, "30s").Should(BeTrue())
for _, slave := range state.Slaves {
err = slave.Client.ClusterFailover(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() bool {
state, _ := client.LoadState(ctx)
return state.IsConsistent(ctx)
}, "30s").Should(BeTrue())
}
})
AfterEach(func() {
failover = false
Expect(client.Close()).NotTo(HaveOccurred())
})
assertClusterClient()
})
Describe("ClusterClient with RouteByLatency", func() { Describe("ClusterClient with RouteByLatency", func() {
BeforeEach(func() { BeforeEach(func() {
opt = redisClusterOptions() opt = redisClusterOptions()
@ -1132,7 +1079,7 @@ var _ = Describe("ClusterClient with unavailable Cluster", func() {
opt.ReadTimeout = 250 * time.Millisecond opt.ReadTimeout = 250 * time.Millisecond
opt.WriteTimeout = 250 * time.Millisecond opt.WriteTimeout = 250 * time.Millisecond
opt.MaxRedirects = 1 opt.MaxRedirects = 1
client = cluster.newClusterClientUnsafe(opt) client = cluster.newClusterClientUnstable(opt)
}) })
AfterEach(func() { AfterEach(func() {

View File

@ -19,6 +19,7 @@ func (c *PubSub) SetNetConn(netConn net.Conn) {
} }
func (c *ClusterClient) LoadState(ctx context.Context) (*clusterState, error) { func (c *ClusterClient) LoadState(ctx context.Context) (*clusterState, error) {
// return c.state.Reload(ctx)
return c.loadState(ctx) return c.loadState(ctx)
} }

View File

@ -254,7 +254,7 @@ func newConnPool(opt *Options) *pool.ConnPool {
) )
conn, err = opt.Dialer(ctx, opt.Network, opt.Addr) conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
if err != nil { if err != nil {
internal.RecordError(ctx, err) _ = internal.RecordError(ctx, err)
} }
return err return err
}) })