refactor: remove unused context attributes (#2154)

* refactor: remove unused context field
This commit is contained in:
Knut Zuidema 2022-07-14 12:43:42 +02:00 committed by GitHub
parent 092a692384
commit e061db8c13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 16 additions and 25 deletions

View File

@ -708,7 +708,6 @@ type ClusterClient struct {
*clusterClient *clusterClient
cmdable cmdable
hooks hooks
ctx context.Context
} }
// NewClusterClient returns a Redis Cluster client as described in // NewClusterClient returns a Redis Cluster client as described in
@ -721,7 +720,6 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
opt: opt, opt: opt,
nodes: newClusterNodes(opt), nodes: newClusterNodes(opt),
}, },
ctx: context.Background(),
} }
c.state = newClusterStateHolder(c.loadState) c.state = newClusterStateHolder(c.loadState)
c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
@ -765,8 +763,8 @@ func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
} }
func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
cmdInfo := c.cmdInfo(cmd.Name()) cmdInfo := c.cmdInfo(ctx, cmd.Name())
slot := c.cmdSlot(cmd) slot := c.cmdSlot(ctx, cmd)
var node *clusterNode var node *clusterNode
var ask bool var ask bool
@ -1141,9 +1139,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return err return err
} }
if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { if c.opt.ReadOnly && c.cmdsAreReadOnly(ctx, cmds) {
for _, cmd := range cmds { for _, cmd := range cmds {
slot := c.cmdSlot(cmd) slot := c.cmdSlot(ctx, cmd)
node, err := c.slotReadOnlyNode(state, slot) node, err := c.slotReadOnlyNode(state, slot)
if err != nil { if err != nil {
return err return err
@ -1154,7 +1152,7 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
} }
for _, cmd := range cmds { for _, cmd := range cmds {
slot := c.cmdSlot(cmd) slot := c.cmdSlot(ctx, cmd)
node, err := state.slotMasterNode(slot) node, err := state.slotMasterNode(slot)
if err != nil { if err != nil {
return err return err
@ -1164,9 +1162,9 @@ func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmd
return nil return nil
} }
func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { func (c *ClusterClient) cmdsAreReadOnly(ctx context.Context, cmds []Cmder) bool {
for _, cmd := range cmds { for _, cmd := range cmds {
cmdInfo := c.cmdInfo(cmd.Name()) cmdInfo := c.cmdInfo(ctx, cmd.Name())
if cmdInfo == nil || !cmdInfo.ReadOnly { if cmdInfo == nil || !cmdInfo.ReadOnly {
return false return false
} }
@ -1278,7 +1276,7 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return err return err
} }
cmdsMap := c.mapCmdsBySlot(cmds) cmdsMap := c.mapCmdsBySlot(ctx, cmds)
for slot, cmds := range cmdsMap { for slot, cmds := range cmdsMap {
node, err := state.slotMasterNode(slot) node, err := state.slotMasterNode(slot)
if err != nil { if err != nil {
@ -1329,10 +1327,10 @@ func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) er
return cmdsFirstErr(cmds) return cmdsFirstErr(cmds)
} }
func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { func (c *ClusterClient) mapCmdsBySlot(ctx context.Context, cmds []Cmder) map[int][]Cmder {
cmdsMap := make(map[int][]Cmder) cmdsMap := make(map[int][]Cmder)
for _, cmd := range cmds { for _, cmd := range cmds {
slot := c.cmdSlot(cmd) slot := c.cmdSlot(ctx, cmd)
cmdsMap[slot] = append(cmdsMap[slot], cmd) cmdsMap[slot] = append(cmdsMap[slot], cmd)
} }
return cmdsMap return cmdsMap
@ -1602,8 +1600,8 @@ func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo,
return nil, firstErr return nil, firstErr
} }
func (c *ClusterClient) cmdInfo(name string) *CommandInfo { func (c *ClusterClient) cmdInfo(ctx context.Context, name string) *CommandInfo {
cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
if err != nil { if err != nil {
internal.Logger.Printf(context.TODO(), "getting command info: %s", err) internal.Logger.Printf(context.TODO(), "getting command info: %s", err)
return nil return nil
@ -1616,13 +1614,13 @@ func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
return info return info
} }
func (c *ClusterClient) cmdSlot(cmd Cmder) int { func (c *ClusterClient) cmdSlot(ctx context.Context, cmd Cmder) int {
args := cmd.Args() args := cmd.Args()
if args[0] == "cluster" && args[1] == "getkeysinslot" { if args[0] == "cluster" && args[1] == "getkeysinslot" {
return args[2].(int) return args[2].(int)
} }
cmdInfo := c.cmdInfo(cmd.Name()) cmdInfo := c.cmdInfo(ctx, cmd.Name())
return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
} }

View File

@ -546,7 +546,6 @@ type Client struct {
*baseClient *baseClient
cmdable cmdable
hooks hooks
ctx context.Context
} }
// NewClient returns a client to the Redis Server specified by Options. // NewClient returns a client to the Redis Server specified by Options.
@ -555,7 +554,6 @@ func NewClient(opt *Options) *Client {
c := Client{ c := Client{
baseClient: newBaseClient(opt, newConnPool(opt)), baseClient: newBaseClient(opt, newConnPool(opt)),
ctx: context.Background(),
} }
c.cmdable = c.Process c.cmdable = c.Process

View File

@ -410,7 +410,6 @@ type Ring struct {
*ring *ring
cmdable cmdable
hooks hooks
ctx context.Context
} }
func NewRing(opt *RingOptions) *Ring { func NewRing(opt *RingOptions) *Ring {
@ -421,7 +420,6 @@ func NewRing(opt *RingOptions) *Ring {
opt: opt, opt: opt,
shards: newRingShards(opt), shards: newRingShards(opt),
}, },
ctx: context.Background(),
} }
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo) ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)

View File

@ -209,7 +209,6 @@ func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
c := Client{ c := Client{
baseClient: newBaseClient(opt, connPool), baseClient: newBaseClient(opt, connPool),
ctx: context.Background(),
} }
c.cmdable = c.Process c.cmdable = c.Process
c.onClose = failover.Close c.onClose = failover.Close

6
tx.go
View File

@ -20,17 +20,15 @@ type Tx struct {
cmdable cmdable
statefulCmdable statefulCmdable
hooks hooks
ctx context.Context
} }
func (c *Client) newTx(ctx context.Context) *Tx { func (c *Client) newTx() *Tx {
tx := Tx{ tx := Tx{
baseClient: baseClient{ baseClient: baseClient{
opt: c.opt, opt: c.opt,
connPool: pool.NewStickyConnPool(c.connPool), connPool: pool.NewStickyConnPool(c.connPool),
}, },
hooks: c.hooks.clone(), hooks: c.hooks.clone(),
ctx: ctx,
} }
tx.init() tx.init()
return &tx return &tx
@ -50,7 +48,7 @@ func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
// //
// The transaction is automatically closed when fn exits. // The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
tx := c.newTx(ctx) tx := c.newTx()
defer tx.Close(ctx) defer tx.Close(ctx)
if len(keys) > 0 { if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil { if err := tx.Watch(ctx, keys...).Err(); err != nil {