forked from mirror/redis
Merge pull request #331 from go-redis/feature/for-each-master
Add ForEachMaster API.
This commit is contained in:
commit
1d69f3f701
121
cluster.go
121
cluster.go
|
@ -13,8 +13,8 @@ import (
|
||||||
|
|
||||||
type clusterNode struct {
|
type clusterNode struct {
|
||||||
Addr string
|
Addr string
|
||||||
Latency int
|
|
||||||
Client *Client
|
Client *Client
|
||||||
|
Latency time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterClient is a Redis Cluster client representing a pool of zero
|
// ClusterClient is a Redis Cluster client representing a pool of zero
|
||||||
|
@ -73,8 +73,8 @@ func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) getNodes() map[string]*clusterNode {
|
func (c *ClusterClient) getNodes() map[string]*clusterNode {
|
||||||
c.mu.RLock()
|
|
||||||
var nodes map[string]*clusterNode
|
var nodes map[string]*clusterNode
|
||||||
|
c.mu.RLock()
|
||||||
if !c.closed {
|
if !c.closed {
|
||||||
nodes = make(map[string]*clusterNode, len(c.nodes))
|
nodes = make(map[string]*clusterNode, len(c.nodes))
|
||||||
for addr, node := range c.nodes {
|
for addr, node := range c.nodes {
|
||||||
|
@ -95,7 +95,7 @@ func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
|
||||||
|
|
||||||
// PoolStats returns accumulated connection pool stats.
|
// PoolStats returns accumulated connection pool stats.
|
||||||
func (c *ClusterClient) PoolStats() *PoolStats {
|
func (c *ClusterClient) PoolStats() *PoolStats {
|
||||||
acc := PoolStats{}
|
var acc PoolStats
|
||||||
for _, node := range c.getNodes() {
|
for _, node := range c.getNodes() {
|
||||||
s := node.Client.connPool.Stats()
|
s := node.Client.connPool.Stats()
|
||||||
acc.Requests += s.Requests
|
acc.Requests += s.Requests
|
||||||
|
@ -214,7 +214,6 @@ func (c *ClusterClient) slotSlaveNode(slot int) (*clusterNode, error) {
|
||||||
n := rand.Intn(len(nodes)-1) + 1
|
n := rand.Intn(len(nodes)-1) + 1
|
||||||
return nodes[n], nil
|
return nodes[n], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) slotClosestNode(slot int) (*clusterNode, error) {
|
func (c *ClusterClient) slotClosestNode(slot int) (*clusterNode, error) {
|
||||||
|
@ -261,19 +260,19 @@ func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) {
|
||||||
return slot, node, err
|
return slot, node, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) Process(cmd Cmder) {
|
func (c *ClusterClient) Process(cmd Cmder) error {
|
||||||
var ask bool
|
|
||||||
slot, node, err := c.cmdSlotAndNode(cmd)
|
slot, node, err := c.cmdSlotAndNode(cmd)
|
||||||
|
if err != nil {
|
||||||
|
cmd.setErr(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ask bool
|
||||||
for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
|
for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
|
||||||
if attempt > 0 {
|
if attempt > 0 {
|
||||||
cmd.reset()
|
cmd.reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
cmd.setErr(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ask {
|
if ask {
|
||||||
pipe := node.Client.Pipeline()
|
pipe := node.Client.Pipeline()
|
||||||
pipe.Process(NewCmd("ASKING"))
|
pipe.Process(NewCmd("ASKING"))
|
||||||
|
@ -288,7 +287,7 @@ func (c *ClusterClient) Process(cmd Cmder) {
|
||||||
// If there is no (real) error, we are done!
|
// If there is no (real) error, we are done!
|
||||||
err := cmd.Err()
|
err := cmd.Err()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// On network errors try random node.
|
// On network errors try random node.
|
||||||
|
@ -307,11 +306,58 @@ func (c *ClusterClient) Process(cmd Cmder) {
|
||||||
}
|
}
|
||||||
|
|
||||||
node, err = c.nodeByAddr(addr)
|
node, err = c.nodeByAddr(addr)
|
||||||
|
if err != nil {
|
||||||
|
cmd.setErr(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return cmd.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForEachMaster concurrently calls the fn on each master node in the cluster.
|
||||||
|
// It returns the first error if any.
|
||||||
|
func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
|
||||||
|
c.mu.RLock()
|
||||||
|
slots := c.slots
|
||||||
|
c.mu.RUnlock()
|
||||||
|
|
||||||
|
var retErr error
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
visited := make(map[*clusterNode]struct{})
|
||||||
|
for _, nodes := range slots {
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
master := nodes[0]
|
||||||
|
if _, ok := visited[master]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
visited[master] = struct{}{}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(node *clusterNode) {
|
||||||
|
err := fn(node.Client)
|
||||||
|
if err != nil {
|
||||||
|
mu.Lock()
|
||||||
|
if retErr == nil {
|
||||||
|
retErr = err
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(master)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return retErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeClients closes all clients and returns the first error if there are any.
|
// closeClients closes all clients and returns the first error if there are any.
|
||||||
|
@ -327,9 +373,6 @@ func (c *ClusterClient) closeClients() error {
|
||||||
|
|
||||||
func (c *ClusterClient) setSlots(cs []ClusterSlot) {
|
func (c *ClusterClient) setSlots(cs []ClusterSlot) {
|
||||||
slots := make([][]*clusterNode, hashtag.SlotNumber)
|
slots := make([][]*clusterNode, hashtag.SlotNumber)
|
||||||
for i := 0; i < hashtag.SlotNumber; i++ {
|
|
||||||
slots[i] = nil
|
|
||||||
}
|
|
||||||
for _, s := range cs {
|
for _, s := range cs {
|
||||||
var nodes []*clusterNode
|
var nodes []*clusterNode
|
||||||
for _, n := range s.Nodes {
|
for _, n := range s.Nodes {
|
||||||
|
@ -351,17 +394,11 @@ func (c *ClusterClient) setSlots(cs []ClusterSlot) {
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) setNodesLatency() {
|
func (c *ClusterClient) lazyReloadSlots() {
|
||||||
nodes := c.getNodes()
|
if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
|
||||||
for _, node := range nodes {
|
return
|
||||||
var latency int
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
t1 := time.Now()
|
|
||||||
node.Client.Ping()
|
|
||||||
latency += int(time.Since(t1) / time.Millisecond)
|
|
||||||
}
|
|
||||||
node.Latency = latency
|
|
||||||
}
|
}
|
||||||
|
go c.reloadSlots()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) reloadSlots() {
|
func (c *ClusterClient) reloadSlots() {
|
||||||
|
@ -384,11 +421,17 @@ func (c *ClusterClient) reloadSlots() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ClusterClient) lazyReloadSlots() {
|
func (c *ClusterClient) setNodesLatency() {
|
||||||
if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
|
const n = 10
|
||||||
return
|
for _, node := range c.getNodes() {
|
||||||
|
var latency time.Duration
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
t1 := time.Now()
|
||||||
|
node.Client.Ping()
|
||||||
|
latency += time.Since(t1)
|
||||||
|
}
|
||||||
|
node.Latency = latency / n
|
||||||
}
|
}
|
||||||
go c.reloadSlots()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// reaper closes idle connections to the cluster.
|
// reaper closes idle connections to the cluster.
|
||||||
|
@ -435,7 +478,7 @@ func (c *ClusterClient) Pipelined(fn func(*Pipeline) error) ([]Cmder, error) {
|
||||||
|
|
||||||
func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
|
func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
|
||||||
var retErr error
|
var retErr error
|
||||||
returnError := func(err error) {
|
setRetErr := func(err error) {
|
||||||
if retErr == nil {
|
if retErr == nil {
|
||||||
retErr = err
|
retErr = err
|
||||||
}
|
}
|
||||||
|
@ -446,7 +489,7 @@ func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
|
||||||
_, node, err := c.cmdSlotAndNode(cmd)
|
_, node, err := c.cmdSlotAndNode(cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmd.setErr(err)
|
cmd.setErr(err)
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cmdsMap[node] = append(cmdsMap[node], cmd)
|
cmdsMap[node] = append(cmdsMap[node], cmd)
|
||||||
|
@ -461,21 +504,21 @@ func (c *ClusterClient) pipelineExec(cmds []Cmder) error {
|
||||||
node, err = c.randomNode()
|
node, err = c.randomNode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setCmdsErr(cmds, err)
|
setCmdsErr(cmds, err)
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cn, err := node.Client.conn()
|
cn, err := node.Client.conn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setCmdsErr(cmds, retErr)
|
setCmdsErr(cmds, err)
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
failedCmds, err = c.execClusterCmds(cn, cmds, failedCmds)
|
failedCmds, err = c.execClusterCmds(cn, cmds, failedCmds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
}
|
}
|
||||||
node.Client.putConn(cn, err, false)
|
node.Client.putConn(cn, err, false)
|
||||||
}
|
}
|
||||||
|
@ -495,7 +538,7 @@ func (c *ClusterClient) execClusterCmds(
|
||||||
}
|
}
|
||||||
|
|
||||||
var retErr error
|
var retErr error
|
||||||
returnError := func(err error) {
|
setRetErr := func(err error) {
|
||||||
if retErr == nil {
|
if retErr == nil {
|
||||||
retErr = err
|
retErr = err
|
||||||
}
|
}
|
||||||
|
@ -515,7 +558,7 @@ func (c *ClusterClient) execClusterCmds(
|
||||||
cmd.reset()
|
cmd.reset()
|
||||||
node, err := c.nodeByAddr(addr)
|
node, err := c.nodeByAddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
failedCmds[node] = append(failedCmds[node], cmd)
|
failedCmds[node] = append(failedCmds[node], cmd)
|
||||||
|
@ -523,12 +566,12 @@ func (c *ClusterClient) execClusterCmds(
|
||||||
cmd.reset()
|
cmd.reset()
|
||||||
node, err := c.nodeByAddr(addr)
|
node, err := c.nodeByAddr(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
|
failedCmds[node] = append(failedCmds[node], NewCmd("ASKING"), cmd)
|
||||||
} else {
|
} else {
|
||||||
returnError(err)
|
setRetErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -336,11 +336,11 @@ var _ = Describe("ClusterClient", func() {
|
||||||
Expect(cnt).To(Equal(int64(1)))
|
Expect(cnt).To(Equal(int64(1)))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should return pool stats", func() {
|
It("returns pool stats", func() {
|
||||||
Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
|
Expect(client.PoolStats()).To(BeAssignableToTypeOf(&redis.PoolStats{}))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should follow redirects", func() {
|
It("follows redirects", func() {
|
||||||
Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred())
|
Expect(client.Set("A", "VALUE", 0).Err()).NotTo(HaveOccurred())
|
||||||
|
|
||||||
slot := hashtag.Slot("A")
|
slot := hashtag.Slot("A")
|
||||||
|
@ -351,7 +351,7 @@ var _ = Describe("ClusterClient", func() {
|
||||||
Expect(val).To(Equal("VALUE"))
|
Expect(val).To(Equal("VALUE"))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should return error when there are no attempts left", func() {
|
It("returns an error when there are no attempts left", func() {
|
||||||
client := cluster.clusterClient(&redis.ClusterOptions{
|
client := cluster.clusterClient(&redis.ClusterOptions{
|
||||||
MaxRedirects: -1,
|
MaxRedirects: -1,
|
||||||
})
|
})
|
||||||
|
@ -366,7 +366,7 @@ var _ = Describe("ClusterClient", func() {
|
||||||
Expect(client.Close()).NotTo(HaveOccurred())
|
Expect(client.Close()).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should Watch", func() {
|
It("supports Watch", func() {
|
||||||
var incr func(string) error
|
var incr func(string) error
|
||||||
|
|
||||||
// Transactionally increments key using GET and SET commands.
|
// Transactionally increments key using GET and SET commands.
|
||||||
|
@ -461,17 +461,35 @@ var _ = Describe("ClusterClient", func() {
|
||||||
Expect(c.Err()).NotTo(HaveOccurred())
|
Expect(c.Err()).NotTo(HaveOccurred())
|
||||||
Expect(c.Val()).To(Equal("C_value"))
|
Expect(c.Val()).To(Equal("C_value"))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("calls fn for every master node", func() {
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
Expect(client.Set(strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
|
err := client.ForEachMaster(func(master *redis.Client) error {
|
||||||
|
return master.FlushDb().Err()
|
||||||
|
})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
for _, client := range cluster.masters() {
|
||||||
|
keys, err := client.Keys("*").Result()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(keys).To(HaveLen(0))
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
Describe("default ClusterClient", func() {
|
Describe("default ClusterClient", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
client = cluster.clusterClient(nil)
|
client = cluster.clusterClient(nil)
|
||||||
|
|
||||||
|
_ = client.ForEachMaster(func(master *redis.Client) error {
|
||||||
|
return master.FlushDb().Err()
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
for _, client := range cluster.masters() {
|
|
||||||
Expect(client.FlushDb().Err()).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
Expect(client.Close()).NotTo(HaveOccurred())
|
Expect(client.Close()).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -483,12 +501,14 @@ var _ = Describe("ClusterClient", func() {
|
||||||
client = cluster.clusterClient(&redis.ClusterOptions{
|
client = cluster.clusterClient(&redis.ClusterOptions{
|
||||||
RouteByLatency: true,
|
RouteByLatency: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
_ = client.ForEachMaster(func(master *redis.Client) error {
|
||||||
|
return master.FlushDb().Err()
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
for _, client := range cluster.masters() {
|
client.FlushDb()
|
||||||
Expect(client.FlushDb().Err()).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
Expect(client.Close()).NotTo(HaveOccurred())
|
Expect(client.Close()).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ var (
|
||||||
type Cmder interface {
|
type Cmder interface {
|
||||||
args() []interface{}
|
args() []interface{}
|
||||||
arg(int) string
|
arg(int) string
|
||||||
|
|
||||||
readReply(*pool.Conn) error
|
readReply(*pool.Conn) error
|
||||||
setErr(error)
|
setErr(error)
|
||||||
reset()
|
reset()
|
||||||
|
@ -142,7 +143,9 @@ type Cmd struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCmd(args ...interface{}) *Cmd {
|
func NewCmd(args ...interface{}) *Cmd {
|
||||||
return &Cmd{baseCmd: newBaseCmd(args)}
|
return &Cmd{
|
||||||
|
baseCmd: newBaseCmd(args),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *Cmd) reset() {
|
func (cmd *Cmd) reset() {
|
||||||
|
|
|
@ -52,11 +52,11 @@ func formatSec(dur time.Duration) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type cmdable struct {
|
type cmdable struct {
|
||||||
process func(cmd Cmder)
|
process func(cmd Cmder) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type statefulCmdable struct {
|
type statefulCmdable struct {
|
||||||
process func(cmd Cmder)
|
process func(cmd Cmder) error
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
|
@ -22,10 +22,11 @@ type Pipeline struct {
|
||||||
closed int32
|
closed int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pipe *Pipeline) Process(cmd Cmder) {
|
func (pipe *Pipeline) Process(cmd Cmder) error {
|
||||||
pipe.mu.Lock()
|
pipe.mu.Lock()
|
||||||
pipe.cmds = append(pipe.cmds, cmd)
|
pipe.cmds = append(pipe.cmds, cmd)
|
||||||
pipe.mu.Unlock()
|
pipe.mu.Unlock()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the pipeline, releasing any open resources.
|
// Close closes the pipeline, releasing any open resources.
|
||||||
|
|
10
redis.go
10
redis.go
|
@ -74,7 +74,7 @@ func (c *baseClient) initConn(cn *pool.Conn) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *baseClient) Process(cmd Cmder) {
|
func (c *baseClient) Process(cmd Cmder) error {
|
||||||
for i := 0; i <= c.opt.MaxRetries; i++ {
|
for i := 0; i <= c.opt.MaxRetries; i++ {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
cmd.reset()
|
cmd.reset()
|
||||||
|
@ -83,7 +83,7 @@ func (c *baseClient) Process(cmd Cmder) {
|
||||||
cn, err := c.conn()
|
cn, err := c.conn()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmd.setErr(err)
|
cmd.setErr(err)
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readTimeout := cmd.readTimeout()
|
readTimeout := cmd.readTimeout()
|
||||||
|
@ -100,7 +100,7 @@ func (c *baseClient) Process(cmd Cmder) {
|
||||||
if err != nil && shouldRetry(err) {
|
if err != nil && shouldRetry(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = cmd.readReply(cn)
|
err = cmd.readReply(cn)
|
||||||
|
@ -109,8 +109,10 @@ func (c *baseClient) Process(cmd Cmder) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return cmd.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *baseClient) closed() bool {
|
func (c *baseClient) closed() bool {
|
||||||
|
|
6
ring.go
6
ring.go
|
@ -199,13 +199,13 @@ func (ring *Ring) getClient(key string) (*Client, error) {
|
||||||
return cl, nil
|
return cl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ring *Ring) Process(cmd Cmder) {
|
func (ring *Ring) Process(cmd Cmder) error {
|
||||||
cl, err := ring.getClient(ring.cmdFirstKey(cmd))
|
cl, err := ring.getClient(ring.cmdFirstKey(cmd))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmd.setErr(err)
|
cmd.setErr(err)
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
cl.baseClient.Process(cmd)
|
return cl.baseClient.Process(cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
// rebalance removes dead shards from the ring.
|
// rebalance removes dead shards from the ring.
|
||||||
|
|
8
tx.go
8
tx.go
|
@ -50,12 +50,12 @@ func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
|
||||||
return retErr
|
return retErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *Tx) Process(cmd Cmder) {
|
func (tx *Tx) Process(cmd Cmder) error {
|
||||||
if tx.cmds == nil {
|
if tx.cmds == nil {
|
||||||
tx.baseClient.Process(cmd)
|
return tx.baseClient.Process(cmd)
|
||||||
} else {
|
|
||||||
tx.cmds = append(tx.cmds, cmd)
|
|
||||||
}
|
}
|
||||||
|
tx.cmds = append(tx.cmds, cmd)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// close closes the transaction, releasing any open resources.
|
// close closes the transaction, releasing any open resources.
|
||||||
|
|
Loading…
Reference in New Issue