From 74fdad907cf197d65556a152273c3157c5ff3c48 Mon Sep 17 00:00:00 2001 From: Ben Keith Date: Fri, 11 Dec 2020 11:21:25 -0500 Subject: [PATCH] ClusterSlots Config and ReadOnly You have to set the ReadOnly flag on the ClusterOptions to make it fall back to non-master nodes on reads. For non-clustered situations where you have multiple nodes in a single ClusterSlot, it made the non-master node useless because it would never route requests to it and if you set ReadOnly=true it would issue a READONLY command to it, which would fail. See comment in code for more details. --- cluster.go | 10 ++++++--- cluster_test.go | 60 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 3 deletions(-) diff --git a/cluster.go b/cluster.go index 3807265..c40e18d 100644 --- a/cluster.go +++ b/cluster.go @@ -86,7 +86,7 @@ func (opt *ClusterOptions) init() { opt.MaxRedirects = 3 } - if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil { + if opt.RouteByLatency || opt.RouteRandomly { opt.ReadOnly = true } @@ -153,9 +153,13 @@ func (opt *ClusterOptions) clientOptions() *Options { IdleTimeout: opt.IdleTimeout, IdleCheckFrequency: disableIdleCheck, - readOnly: opt.ReadOnly, - TLSConfig: opt.TLSConfig, + // If ClusterSlots is populated, then we probably have an artificial + // cluster whose nodes are not in clustering mode (otherwise there isn't + // much use for ClusterSlots config). This means we cannot execute the + // READONLY command against that node -- setting readOnly to false in such + // situations in the options below will prevent that from happening. + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, } } diff --git a/cluster_test.go b/cluster_test.go index 64acecf..27d2a1c 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1020,6 +1020,66 @@ var _ = Describe("ClusterClient", func() { assertClusterClient() }) + + Describe("ClusterClient with ClusterSlots with multiple nodes per slot", func() { + BeforeEach(func() { + failover = true + + opt = redisClusterOptions() + opt.ReadOnly = true + opt.ClusterSlots = func(ctx context.Context) ([]redis.ClusterSlot, error) { + slots := []redis.ClusterSlot{{ + Start: 0, + End: 4999, + Nodes: []redis.ClusterNode{{ + Addr: ":8220", + }, { + Addr: ":8223", + }}, + }, { + Start: 5000, + End: 9999, + Nodes: []redis.ClusterNode{{ + Addr: ":8221", + }, { + Addr: ":8224", + }}, + }, { + Start: 10000, + End: 16383, + Nodes: []redis.ClusterNode{{ + Addr: ":8222", + }, { + Addr: ":8225", + }}, + }} + return slots, nil + } + client = cluster.newClusterClient(ctx, opt) + + err := client.ForEachMaster(ctx, func(ctx context.Context, master *redis.Client) error { + return master.FlushDB(ctx).Err() + }) + Expect(err).NotTo(HaveOccurred()) + + err = client.ForEachSlave(ctx, func(ctx context.Context, slave *redis.Client) error { + Eventually(func() int64 { + return client.DBSize(ctx).Val() + }, 30*time.Second).Should(Equal(int64(0))) + return nil + }) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + failover = false + + err := client.Close() + Expect(err).NotTo(HaveOccurred()) + }) + + assertClusterClient() + }) }) var _ = Describe("ClusterClient without nodes", func() {