mirror of https://github.com/go-redis/redis.git
Select random node when there are no keys.
This commit is contained in:
parent
5aae583e0c
commit
eeba1d7db1
156
cluster.go
156
cluster.go
|
@ -12,6 +12,72 @@ import (
|
||||||
"gopkg.in/redis.v4/internal/pool"
|
"gopkg.in/redis.v4/internal/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ClusterOptions are used to configure a cluster client and should be
|
||||||
|
// passed to NewClusterClient.
|
||||||
|
type ClusterOptions struct {
|
||||||
|
// A seed list of host:port addresses of cluster nodes.
|
||||||
|
Addrs []string
|
||||||
|
|
||||||
|
// The maximum number of retries before giving up. Command is retried
|
||||||
|
// on network errors and MOVED/ASK redirects.
|
||||||
|
// Default is 16.
|
||||||
|
MaxRedirects int
|
||||||
|
|
||||||
|
// Enables read queries for a connection to a Redis Cluster slave node.
|
||||||
|
ReadOnly bool
|
||||||
|
|
||||||
|
// Enables routing read-only queries to the closest master or slave node.
|
||||||
|
RouteByLatency bool
|
||||||
|
|
||||||
|
// Following options are copied from Options struct.
|
||||||
|
|
||||||
|
Password string
|
||||||
|
|
||||||
|
DialTimeout time.Duration
|
||||||
|
ReadTimeout time.Duration
|
||||||
|
WriteTimeout time.Duration
|
||||||
|
|
||||||
|
// PoolSize applies per cluster node and not for the whole cluster.
|
||||||
|
PoolSize int
|
||||||
|
PoolTimeout time.Duration
|
||||||
|
IdleTimeout time.Duration
|
||||||
|
IdleCheckFrequency time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opt *ClusterOptions) init() {
|
||||||
|
if opt.MaxRedirects == -1 {
|
||||||
|
opt.MaxRedirects = 0
|
||||||
|
} else if opt.MaxRedirects == 0 {
|
||||||
|
opt.MaxRedirects = 16
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.RouteByLatency {
|
||||||
|
opt.ReadOnly = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opt *ClusterOptions) clientOptions() *Options {
|
||||||
|
const disableIdleCheck = -1
|
||||||
|
|
||||||
|
return &Options{
|
||||||
|
Password: opt.Password,
|
||||||
|
ReadOnly: opt.ReadOnly,
|
||||||
|
|
||||||
|
DialTimeout: opt.DialTimeout,
|
||||||
|
ReadTimeout: opt.ReadTimeout,
|
||||||
|
WriteTimeout: opt.WriteTimeout,
|
||||||
|
|
||||||
|
PoolSize: opt.PoolSize,
|
||||||
|
PoolTimeout: opt.PoolTimeout,
|
||||||
|
IdleTimeout: opt.IdleTimeout,
|
||||||
|
|
||||||
|
// IdleCheckFrequency is not copied to disable reaper
|
||||||
|
IdleCheckFrequency: disableIdleCheck,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
type clusterNode struct {
|
type clusterNode struct {
|
||||||
Client *Client
|
Client *Client
|
||||||
Latency time.Duration
|
Latency time.Duration
|
||||||
|
@ -36,8 +102,8 @@ type ClusterClient struct {
|
||||||
slots [][]*clusterNode
|
slots [][]*clusterNode
|
||||||
closed bool
|
closed bool
|
||||||
|
|
||||||
cmdsInfo map[string]*CommandInfo
|
|
||||||
cmdsInfoOnce *sync.Once
|
cmdsInfoOnce *sync.Once
|
||||||
|
cmdsInfo map[string]*CommandInfo
|
||||||
|
|
||||||
// Reports where slots reloading is in progress.
|
// Reports where slots reloading is in progress.
|
||||||
reloading uint32
|
reloading uint32
|
||||||
|
@ -81,19 +147,22 @@ func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
|
||||||
}
|
}
|
||||||
c.cmdsInfoOnce = &sync.Once{}
|
c.cmdsInfoOnce = &sync.Once{}
|
||||||
})
|
})
|
||||||
|
if c.cmdsInfo == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return c.cmdsInfo[name]
|
return c.cmdsInfo[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) getNodes() map[string]*clusterNode {
|
func (c *ClusterClient) getNodes() map[string]*clusterNode {
|
||||||
var nodes map[string]*clusterNode
|
var nodes map[string]*clusterNode
|
||||||
c.mu.RLock()
|
|
||||||
if !c.closed {
|
if !c.closed {
|
||||||
nodes = make(map[string]*clusterNode, len(c.nodes))
|
nodes = make(map[string]*clusterNode, len(c.nodes))
|
||||||
|
c.mu.RLock()
|
||||||
for addr, node := range c.nodes {
|
for addr, node := range c.nodes {
|
||||||
nodes[addr] = node
|
nodes[addr] = node
|
||||||
}
|
}
|
||||||
}
|
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
|
}
|
||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,18 +326,11 @@ func (c *ClusterClient) slotClosestNode(slot int) (*clusterNode, error) {
|
||||||
|
|
||||||
func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) {
|
func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) {
|
||||||
cmdInfo := c.cmdInfo(cmd.arg(0))
|
cmdInfo := c.cmdInfo(cmd.arg(0))
|
||||||
if cmdInfo == nil {
|
firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
|
||||||
internal.Logf("info for cmd=%s not found", cmd.arg(0))
|
if firstKey == "" {
|
||||||
node, err := c.randomNode()
|
node, err := c.randomNode()
|
||||||
return 0, node, err
|
return -1, node, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmdInfo.FirstKeyPos == -1 {
|
|
||||||
node, err := c.randomNode()
|
|
||||||
return 0, node, err
|
|
||||||
}
|
|
||||||
|
|
||||||
firstKey := cmd.arg(int(cmdInfo.FirstKeyPos))
|
|
||||||
slot := hashtag.Slot(firstKey)
|
slot := hashtag.Slot(firstKey)
|
||||||
|
|
||||||
if cmdInfo.ReadOnly && c.opt.ReadOnly {
|
if cmdInfo.ReadOnly && c.opt.ReadOnly {
|
||||||
|
@ -330,10 +392,12 @@ func (c *ClusterClient) Process(cmd Cmder) error {
|
||||||
var addr string
|
var addr string
|
||||||
moved, ask, addr = errors.IsMoved(err)
|
moved, ask, addr = errors.IsMoved(err)
|
||||||
if moved || ask {
|
if moved || ask {
|
||||||
|
if slot >= 0 {
|
||||||
master, _ := c.slotMasterNode(slot)
|
master, _ := c.slotMasterNode(slot)
|
||||||
if moved && (master == nil || master.Client.getAddr() != addr) {
|
if moved && (master == nil || master.Client.getAddr() != addr) {
|
||||||
c.lazyReloadSlots()
|
c.lazyReloadSlots()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
node, err = c.nodeByAddr(addr)
|
node, err = c.nodeByAddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -609,69 +673,3 @@ func (c *ClusterClient) execClusterCmds(
|
||||||
|
|
||||||
return failedCmds, retErr
|
return failedCmds, retErr
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// ClusterOptions are used to configure a cluster client and should be
|
|
||||||
// passed to NewClusterClient.
|
|
||||||
type ClusterOptions struct {
|
|
||||||
// A seed list of host:port addresses of cluster nodes.
|
|
||||||
Addrs []string
|
|
||||||
|
|
||||||
// The maximum number of retries before giving up. Command is retried
|
|
||||||
// on network errors and MOVED/ASK redirects.
|
|
||||||
// Default is 16.
|
|
||||||
MaxRedirects int
|
|
||||||
|
|
||||||
// Enables read queries for a connection to a Redis Cluster slave node.
|
|
||||||
ReadOnly bool
|
|
||||||
|
|
||||||
// Enables routing read-only queries to the closest master or slave node.
|
|
||||||
RouteByLatency bool
|
|
||||||
|
|
||||||
// Following options are copied from Options struct.
|
|
||||||
|
|
||||||
Password string
|
|
||||||
|
|
||||||
DialTimeout time.Duration
|
|
||||||
ReadTimeout time.Duration
|
|
||||||
WriteTimeout time.Duration
|
|
||||||
|
|
||||||
// PoolSize applies per cluster node and not for the whole cluster.
|
|
||||||
PoolSize int
|
|
||||||
PoolTimeout time.Duration
|
|
||||||
IdleTimeout time.Duration
|
|
||||||
IdleCheckFrequency time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opt *ClusterOptions) init() {
|
|
||||||
if opt.MaxRedirects == -1 {
|
|
||||||
opt.MaxRedirects = 0
|
|
||||||
} else if opt.MaxRedirects == 0 {
|
|
||||||
opt.MaxRedirects = 16
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.RouteByLatency {
|
|
||||||
opt.ReadOnly = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opt *ClusterOptions) clientOptions() *Options {
|
|
||||||
const disableIdleCheck = -1
|
|
||||||
|
|
||||||
return &Options{
|
|
||||||
Password: opt.Password,
|
|
||||||
ReadOnly: opt.ReadOnly,
|
|
||||||
|
|
||||||
DialTimeout: opt.DialTimeout,
|
|
||||||
ReadTimeout: opt.ReadTimeout,
|
|
||||||
WriteTimeout: opt.WriteTimeout,
|
|
||||||
|
|
||||||
PoolSize: opt.PoolSize,
|
|
||||||
PoolTimeout: opt.PoolTimeout,
|
|
||||||
IdleTimeout: opt.IdleTimeout,
|
|
||||||
|
|
||||||
// IdleCheckFrequency is not copied to disable reaper
|
|
||||||
IdleCheckFrequency: disableIdleCheck,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
27
command.go
27
command.go
|
@ -7,6 +7,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-redis/redis/internal"
|
||||||
|
|
||||||
"gopkg.in/redis.v4/internal/pool"
|
"gopkg.in/redis.v4/internal/pool"
|
||||||
"gopkg.in/redis.v4/internal/proto"
|
"gopkg.in/redis.v4/internal/proto"
|
||||||
)
|
)
|
||||||
|
@ -88,6 +90,22 @@ func cmdString(cmd Cmder, val interface{}) string {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
|
||||||
|
switch cmd.arg(0) {
|
||||||
|
case "eval", "evalsha":
|
||||||
|
if cmd.arg(2) != "0" {
|
||||||
|
return 3
|
||||||
|
} else {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if info == nil {
|
||||||
|
internal.Logf("info for cmd=%s not found", cmd.arg(0))
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return int(info.FirstKeyPos)
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
type baseCmd struct {
|
type baseCmd struct {
|
||||||
|
@ -109,12 +127,11 @@ func (cmd *baseCmd) args() []interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *baseCmd) arg(pos int) string {
|
func (cmd *baseCmd) arg(pos int) string {
|
||||||
if len(cmd._args) > pos {
|
if pos < 0 || pos >= len(cmd._args) {
|
||||||
if s, ok := cmd._args[pos].(string); ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
return ""
|
||||||
|
}
|
||||||
|
s, _ := cmd._args[pos].(string)
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *baseCmd) readTimeout() *time.Duration {
|
func (cmd *baseCmd) readTimeout() *time.Duration {
|
||||||
|
|
|
@ -138,7 +138,6 @@ func redisRingOptions() *redis.RingOptions {
|
||||||
PoolTimeout: 30 * time.Second,
|
PoolTimeout: 30 * time.Second,
|
||||||
IdleTimeout: 500 * time.Millisecond,
|
IdleTimeout: 500 * time.Millisecond,
|
||||||
IdleCheckFrequency: 500 * time.Millisecond,
|
IdleCheckFrequency: 500 * time.Millisecond,
|
||||||
RouteByEvalKeys: true,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
101
ring.go
101
ring.go
|
@ -3,6 +3,8 @@ package redis
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -40,9 +42,6 @@ type RingOptions struct {
|
||||||
PoolTimeout time.Duration
|
PoolTimeout time.Duration
|
||||||
IdleTimeout time.Duration
|
IdleTimeout time.Duration
|
||||||
IdleCheckFrequency time.Duration
|
IdleCheckFrequency time.Duration
|
||||||
|
|
||||||
// RouteByEvalKeys flag to enable eval and evalsha key position parsing for sharding
|
|
||||||
RouteByEvalKeys bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opt *RingOptions) init() {
|
func (opt *RingOptions) init() {
|
||||||
|
@ -131,12 +130,10 @@ type Ring struct {
|
||||||
hash *consistenthash.Map
|
hash *consistenthash.Map
|
||||||
shards map[string]*ringShard
|
shards map[string]*ringShard
|
||||||
|
|
||||||
cmdsInfo map[string]*CommandInfo
|
|
||||||
cmdsInfoOnce *sync.Once
|
cmdsInfoOnce *sync.Once
|
||||||
|
cmdsInfo map[string]*CommandInfo
|
||||||
|
|
||||||
closed bool
|
closed bool
|
||||||
|
|
||||||
routeByEvalKeys bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Cmdable = (*Ring)(nil)
|
var _ Cmdable = (*Ring)(nil)
|
||||||
|
@ -159,7 +156,6 @@ func NewRing(opt *RingOptions) *Ring {
|
||||||
clopt.Addr = addr
|
clopt.Addr = addr
|
||||||
ring.addClient(name, NewClient(clopt))
|
ring.addClient(name, NewClient(clopt))
|
||||||
}
|
}
|
||||||
ring.routeByEvalKeys = opt.RouteByEvalKeys
|
|
||||||
go ring.heartbeat()
|
go ring.heartbeat()
|
||||||
return ring
|
return ring
|
||||||
}
|
}
|
||||||
|
@ -227,30 +223,6 @@ func (c *Ring) cmdInfo(name string) *CommandInfo {
|
||||||
return c.cmdsInfo[name]
|
return c.cmdsInfo[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Ring) getEvalFirstKey(cmd Cmder) string {
|
|
||||||
if c.routeByEvalKeys && cmd.arg(2) != "0" {
|
|
||||||
return cmd.arg(3)
|
|
||||||
} else {
|
|
||||||
return cmd.arg(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Ring) cmdFirstKey(cmd Cmder) string {
|
|
||||||
switch cmd.arg(0) {
|
|
||||||
case "eval":
|
|
||||||
return c.getEvalFirstKey(cmd)
|
|
||||||
case "evalsha":
|
|
||||||
return c.getEvalFirstKey(cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmdInfo := c.cmdInfo(cmd.arg(0))
|
|
||||||
if cmdInfo == nil {
|
|
||||||
internal.Logf("info for cmd=%s not found", cmd.arg(0))
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return cmd.arg(int(cmdInfo.FirstKeyPos))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Ring) addClient(name string, cl *Client) {
|
func (c *Ring) addClient(name string, cl *Client) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
c.hash.Add(name)
|
c.hash.Add(name)
|
||||||
|
@ -258,14 +230,17 @@ func (c *Ring) addClient(name string, cl *Client) {
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Ring) getClient(key string) (*Client, error) {
|
func (c *Ring) shardByKey(key string) (*Client, error) {
|
||||||
|
key = hashtag.Key(key)
|
||||||
|
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
||||||
|
|
||||||
if c.closed {
|
if c.closed {
|
||||||
|
c.mu.RUnlock()
|
||||||
return nil, pool.ErrClosed
|
return nil, pool.ErrClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
name := c.hash.Get(hashtag.Key(key))
|
name := c.hash.Get(key)
|
||||||
if name == "" {
|
if name == "" {
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
return nil, errRingShardsDown
|
return nil, errRingShardsDown
|
||||||
|
@ -276,8 +251,32 @@ func (c *Ring) getClient(key string) (*Client, error) {
|
||||||
return cl, nil
|
return cl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Ring) randomShard() (*Client, error) {
|
||||||
|
return c.shardByKey(strconv.Itoa(rand.Int()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ring) shardByName(name string) (*Client, error) {
|
||||||
|
if name == "" {
|
||||||
|
return c.randomShard()
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mu.RLock()
|
||||||
|
cl := c.shards[name].Client
|
||||||
|
c.mu.RUnlock()
|
||||||
|
return cl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Ring) cmdShard(cmd Cmder) (*Client, error) {
|
||||||
|
cmdInfo := c.cmdInfo(cmd.arg(0))
|
||||||
|
firstKey := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
|
||||||
|
if firstKey == "" {
|
||||||
|
return c.randomShard()
|
||||||
|
}
|
||||||
|
return c.shardByKey(firstKey)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Ring) Process(cmd Cmder) error {
|
func (c *Ring) Process(cmd Cmder) error {
|
||||||
cl, err := c.getClient(c.cmdFirstKey(cmd))
|
cl, err := c.cmdShard(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmd.setErr(err)
|
cmd.setErr(err)
|
||||||
return err
|
return err
|
||||||
|
@ -285,17 +284,18 @@ func (c *Ring) Process(cmd Cmder) error {
|
||||||
return cl.baseClient.Process(cmd)
|
return cl.baseClient.Process(cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
// rebalance removes dead shards from the c.
|
// rebalance removes dead shards from the Ring.
|
||||||
func (c *Ring) rebalance() {
|
func (c *Ring) rebalance() {
|
||||||
defer c.mu.Unlock()
|
hash := consistenthash.New(c.nreplicas, nil)
|
||||||
c.mu.Lock()
|
|
||||||
|
|
||||||
c.hash = consistenthash.New(c.nreplicas, nil)
|
|
||||||
for name, shard := range c.shards {
|
for name, shard := range c.shards {
|
||||||
if shard.IsUp() {
|
if shard.IsUp() {
|
||||||
c.hash.Add(name)
|
hash.Add(name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.mu.Lock()
|
||||||
|
c.hash = hash
|
||||||
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// heartbeat monitors state of each shard in the ring.
|
// heartbeat monitors state of each shard in the ring.
|
||||||
|
@ -370,13 +370,10 @@ func (c *Ring) pipelineExec(cmds []Cmder) error {
|
||||||
|
|
||||||
cmdsMap := make(map[string][]Cmder)
|
cmdsMap := make(map[string][]Cmder)
|
||||||
for _, cmd := range cmds {
|
for _, cmd := range cmds {
|
||||||
name := c.hash.Get(hashtag.Key(c.cmdFirstKey(cmd)))
|
cmdInfo := c.cmdInfo(cmd.arg(0))
|
||||||
if name == "" {
|
name := cmd.arg(cmdFirstKeyPos(cmd, cmdInfo))
|
||||||
cmd.setErr(errRingShardsDown)
|
if name != "" {
|
||||||
if retErr == nil {
|
name = c.hash.Get(hashtag.Key(name))
|
||||||
retErr = errRingShardsDown
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
cmdsMap[name] = append(cmdsMap[name], cmd)
|
cmdsMap[name] = append(cmdsMap[name], cmd)
|
||||||
}
|
}
|
||||||
|
@ -385,7 +382,15 @@ func (c *Ring) pipelineExec(cmds []Cmder) error {
|
||||||
failedCmdsMap := make(map[string][]Cmder)
|
failedCmdsMap := make(map[string][]Cmder)
|
||||||
|
|
||||||
for name, cmds := range cmdsMap {
|
for name, cmds := range cmdsMap {
|
||||||
client := c.shards[name].Client
|
client, err := c.shardByName(name)
|
||||||
|
if err != nil {
|
||||||
|
setCmdsErr(cmds, err)
|
||||||
|
if retErr == nil {
|
||||||
|
retErr = err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
cn, _, err := client.conn()
|
cn, _, err := client.conn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setCmdsErr(cmds, err)
|
setCmdsErr(cmds, err)
|
||||||
|
|
Loading…
Reference in New Issue