Merge pull request #1104 from go-redis/fix/golangci-lint

Add golangci-lint
This commit is contained in:
Vladimir Mihailenco 2019-07-25 15:01:16 +03:00 committed by GitHub
commit ec312e4dd5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 284 additions and 274 deletions

11
.golangci.yml Normal file
View File

@ -0,0 +1,11 @@
run:
concurrency: 8
deadline: 5m
tests: false
linters:
enable-all: true
disable:
- gochecknoglobals
- goconst
- gosec
- maligned

View File

@ -18,3 +18,6 @@ env:
- GO111MODULE=on - GO111MODULE=on
go_import_path: github.com/go-redis/redis go_import_path: github.com/go-redis/redis
before_install:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1

View File

@ -6,6 +6,7 @@ all: testdeps
go vet go vet
go get github.com/gordonklaus/ineffassign go get github.com/gordonklaus/ineffassign
ineffassign . ineffassign .
golangci-lint run
testdeps: testdata/redis/src/redis-server testdeps: testdata/redis/src/redis-server

View File

@ -237,7 +237,7 @@ func BenchmarkRingWithContext(b *testing.B) {
func newClusterScenario() *clusterScenario { func newClusterScenario() *clusterScenario {
return &clusterScenario{ return &clusterScenario{
ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"}, ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
nodeIds: make([]string, 6), nodeIDs: make([]string, 6),
processes: make(map[string]*redisProcess, 6), processes: make(map[string]*redisProcess, 6),
clients: make(map[string]*redis.Client, 6), clients: make(map[string]*redis.Client, 6),
} }

View File

@ -307,6 +307,7 @@ func (c *clusterNodes) NextGeneration() uint32 {
// GC removes unused nodes. // GC removes unused nodes.
func (c *clusterNodes) GC(generation uint32) { func (c *clusterNodes) GC(generation uint32) {
//nolint:prealloc
var collected []*clusterNode var collected []*clusterNode
c.mu.Lock() c.mu.Lock()
for addr, node := range c.allNodes { for addr, node := range c.allNodes {
@ -651,8 +652,8 @@ type clusterClient struct {
opt *ClusterOptions opt *ClusterOptions
nodes *clusterNodes nodes *clusterNodes
state *clusterStateHolder state *clusterStateHolder //nolint:structcheck
cmdsInfoCache *cmdsInfoCache cmdsInfoCache *cmdsInfoCache //nolint:structcheck
} }
// ClusterClient is a Redis Cluster client representing a pool of zero // ClusterClient is a Redis Cluster client representing a pool of zero

View File

@ -18,7 +18,7 @@ import (
type clusterScenario struct { type clusterScenario struct {
ports []string ports []string
nodeIds []string nodeIDs []string
processes map[string]*redisProcess processes map[string]*redisProcess
clients map[string]*redis.Client clients map[string]*redis.Client
} }
@ -98,7 +98,7 @@ func startCluster(scenario *clusterScenario) error {
scenario.processes[port] = process scenario.processes[port] = process
scenario.clients[port] = client scenario.clients[port] = client
scenario.nodeIds[pos] = info[:40] scenario.nodeIDs[pos] = info[:40]
} }
// Meet cluster nodes. // Meet cluster nodes.
@ -120,12 +120,12 @@ func startCluster(scenario *clusterScenario) error {
// Bootstrap slaves. // Bootstrap slaves.
for idx, slave := range scenario.slaves() { for idx, slave := range scenario.slaves() {
masterId := scenario.nodeIds[idx] masterID := scenario.nodeIDs[idx]
// Wait until master is available // Wait until master is available
err := eventually(func() error { err := eventually(func() error {
s := slave.ClusterNodes().Val() s := slave.ClusterNodes().Val()
wanted := masterId wanted := masterID
if !strings.Contains(s, wanted) { if !strings.Contains(s, wanted) {
return fmt.Errorf("%q does not contain %q", s, wanted) return fmt.Errorf("%q does not contain %q", s, wanted)
} }
@ -135,7 +135,7 @@ func startCluster(scenario *clusterScenario) error {
return err return err
} }
err = slave.ClusterReplicate(masterId).Err() err = slave.ClusterReplicate(masterID).Err()
if err != nil { if err != nil {
return err return err
} }
@ -146,30 +146,30 @@ func startCluster(scenario *clusterScenario) error {
Start: 0, Start: 0,
End: 4999, End: 4999,
Nodes: []redis.ClusterNode{{ Nodes: []redis.ClusterNode{{
Id: "", ID: "",
Addr: "127.0.0.1:8220", Addr: "127.0.0.1:8220",
}, { }, {
Id: "", ID: "",
Addr: "127.0.0.1:8223", Addr: "127.0.0.1:8223",
}}, }},
}, { }, {
Start: 5000, Start: 5000,
End: 9999, End: 9999,
Nodes: []redis.ClusterNode{{ Nodes: []redis.ClusterNode{{
Id: "", ID: "",
Addr: "127.0.0.1:8221", Addr: "127.0.0.1:8221",
}, { }, {
Id: "", ID: "",
Addr: "127.0.0.1:8224", Addr: "127.0.0.1:8224",
}}, }},
}, { }, {
Start: 10000, Start: 10000,
End: 16383, End: 16383,
Nodes: []redis.ClusterNode{{ Nodes: []redis.ClusterNode{{
Id: "", ID: "",
Addr: "127.0.0.1:8222", Addr: "127.0.0.1:8222",
}, { }, {
Id: "", ID: "",
Addr: "127.0.0.1:8225", Addr: "127.0.0.1:8225",
}}, }},
}} }}
@ -592,30 +592,30 @@ var _ = Describe("ClusterClient", func() {
Start: 0, Start: 0,
End: 4999, End: 4999,
Nodes: []redis.ClusterNode{{ Nodes: []redis.ClusterNode{{
Id: "", ID: "",
Addr: "127.0.0.1:8220", Addr: "127.0.0.1:8220",
}, { }, {
Id: "", ID: "",
Addr: "127.0.0.1:8223", Addr: "127.0.0.1:8223",
}}, }},
}, { }, {
Start: 5000, Start: 5000,
End: 9999, End: 9999,
Nodes: []redis.ClusterNode{{ Nodes: []redis.ClusterNode{{
Id: "", ID: "",
Addr: "127.0.0.1:8221", Addr: "127.0.0.1:8221",
}, { }, {
Id: "", ID: "",
Addr: "127.0.0.1:8224", Addr: "127.0.0.1:8224",
}}, }},
}, { }, {
Start: 10000, Start: 10000,
End: 16383, End: 16383,
Nodes: []redis.ClusterNode{{ Nodes: []redis.ClusterNode{{
Id: "", ID: "",
Addr: "127.0.0.1:8222", Addr: "127.0.0.1:8222",
}, { }, {
Id: "", ID: "",
Addr: "127.0.0.1:8225", Addr: "127.0.0.1:8225",
}}, }},
}} }}
@ -647,7 +647,7 @@ var _ = Describe("ClusterClient", func() {
}) })
It("should CLUSTER COUNT-FAILURE-REPORTS", func() { It("should CLUSTER COUNT-FAILURE-REPORTS", func() {
n, err := client.ClusterCountFailureReports(cluster.nodeIds[0]).Result() n, err := client.ClusterCountFailureReports(cluster.nodeIDs[0]).Result()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(int64(0))) Expect(n).To(Equal(int64(0)))
}) })
@ -665,7 +665,7 @@ var _ = Describe("ClusterClient", func() {
}) })
It("should CLUSTER SLAVES", func() { It("should CLUSTER SLAVES", func() {
nodesList, err := client.ClusterSlaves(cluster.nodeIds[0]).Result() nodesList, err := client.ClusterSlaves(cluster.nodeIDs[0]).Result()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(nodesList).Should(ContainElement(ContainSubstring("slave"))) Expect(nodesList).Should(ContainElement(ContainSubstring("slave")))
Expect(nodesList).Should(HaveLen(1)) Expect(nodesList).Should(HaveLen(1))

View File

@ -52,7 +52,7 @@ func writeCmd(wr *proto.Writer, cmds ...Cmder) error {
} }
func cmdString(cmd Cmder, val interface{}) string { func cmdString(cmd Cmder, val interface{}) string {
var ss []string ss := make([]string, 0, len(cmd.Args()))
for _, arg := range cmd.Args() { for _, arg := range cmd.Args() {
ss = append(ss, fmt.Sprint(arg)) ss = append(ss, fmt.Sprint(arg))
} }
@ -996,6 +996,7 @@ func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
msgs := make([]XMessage, n) msgs := make([]XMessage, n)
for i := 0; i < len(msgs); i++ { for i := 0; i < len(msgs); i++ {
i := i
_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
id, err := rd.ReadString() id, err := rd.ReadString()
if err != nil { if err != nil {
@ -1076,6 +1077,7 @@ func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]XStream, n) cmd.val = make([]XStream, n)
for i := 0; i < len(cmd.val); i++ { for i := 0; i < len(cmd.val); i++ {
i := i
_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
if n != 2 { if n != 2 {
return nil, fmt.Errorf("got %d, wanted 2", n) return nil, fmt.Errorf("got %d, wanted 2", n)
@ -1441,7 +1443,7 @@ func (cmd *ScanCmd) Iterator() *ScanIterator {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
type ClusterNode struct { type ClusterNode struct {
Id string ID string
Addr string Addr string
} }
@ -1528,7 +1530,7 @@ func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodes[j].Id = id nodes[j].ID = id
} }
} }
@ -1737,6 +1739,7 @@ func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
_, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { _, cmd.err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make([]*GeoPos, n) cmd.val = make([]*GeoPos, n)
for i := 0; i < len(cmd.val); i++ { for i := 0; i < len(cmd.val); i++ {
i := i
_, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
longitude, err := rd.ReadFloatReply() longitude, err := rd.ReadFloatReply()
if err != nil { if err != nil {

File diff suppressed because it is too large Load Diff

View File

@ -64,11 +64,12 @@ func IsMovedError(err error) (moved bool, ask bool, addr string) {
} }
s := err.Error() s := err.Error()
if strings.HasPrefix(s, "MOVED ") { switch {
case strings.HasPrefix(s, "MOVED "):
moved = true moved = true
} else if strings.HasPrefix(s, "ASK ") { case strings.HasPrefix(s, "ASK "):
ask = true ask = true
} else { default:
return return
} }

View File

@ -447,7 +447,7 @@ func (p *ConnPool) ReapStaleConns() (int, error) {
p.freeTurn() p.freeTurn()
if cn != nil { if cn != nil {
p.closeConn(cn) _ = p.closeConn(cn)
n++ n++
} else { } else {
break break

View File

@ -90,9 +90,8 @@ func (w *Writer) writeArg(v interface{}) error {
case bool: case bool:
if v { if v {
return w.int(1) return w.int(1)
} else {
return w.int(0)
} }
return w.int(0)
case time.Time: case time.Time:
return w.string(v.Format(time.RFC3339)) return w.string(v.Format(time.RFC3339))
case encoding.BinaryMarshaler: case encoding.BinaryMarshaler:

View File

@ -45,7 +45,7 @@ var (
var cluster = &clusterScenario{ var cluster = &clusterScenario{
ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"}, ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
nodeIds: make([]string, 6), nodeIDs: make([]string, 6),
processes: make(map[string]*redisProcess, 6), processes: make(map[string]*redisProcess, 6),
clients: make(map[string]*redis.Client, 6), clients: make(map[string]*redis.Client, 6),
} }

View File

@ -113,9 +113,8 @@ func (opt *Options) init() {
} }
if opt.TLSConfig == nil { if opt.TLSConfig == nil {
return netDialer.Dial(network, addr) return netDialer.Dial(network, addr)
} else {
return tls.DialWithDialer(netDialer, opt.Network, opt.Addr, opt.TLSConfig)
} }
return tls.DialWithDialer(netDialer, opt.Network, opt.Addr, opt.TLSConfig)
} }
} }
if opt.PoolSize == 0 { if opt.PoolSize == 0 {

View File

@ -71,7 +71,7 @@ func (c *Pipeline) Process(cmd Cmder) error {
// Close closes the pipeline, releasing any open resources. // Close closes the pipeline, releasing any open resources.
func (c *Pipeline) Close() error { func (c *Pipeline) Close() error {
c.mu.Lock() c.mu.Lock()
c.discard() _ = c.discard()
c.closed = true c.closed = true
c.mu.Unlock() c.mu.Unlock()
return nil return nil

View File

@ -97,10 +97,7 @@ func (c *PubSub) resubscribe(cn *pool.Conn) error {
var firstErr error var firstErr error
if len(c.channels) > 0 { if len(c.channels) > 0 {
err := c._subscribe(cn, "subscribe", mapKeys(c.channels)) firstErr = c._subscribe(cn, "subscribe", mapKeys(c.channels))
if err != nil && firstErr == nil {
firstErr = err
}
} }
if len(c.patterns) > 0 { if len(c.patterns) > 0 {

View File

@ -339,7 +339,7 @@ type ring struct {
hooks hooks
opt *RingOptions opt *RingOptions
shards *ringShards shards *ringShards
cmdsInfoCache *cmdsInfoCache cmdsInfoCache *cmdsInfoCache //nolint:structcheck
} }
// Ring is a Redis client that uses consistent hashing to distribute // Ring is a Redis client that uses consistent hashing to distribute

View File

@ -24,7 +24,7 @@ type Script struct {
func NewScript(src string) *Script { func NewScript(src string) *Script {
h := sha1.New() h := sha1.New()
io.WriteString(h, src) _, _ = io.WriteString(h, src)
return &Script{ return &Script{
src: src, src: src,
hash: hex.EncodeToString(h.Sum(nil)), hash: hex.EncodeToString(h.Sum(nil)),

View File

@ -432,15 +432,10 @@ func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
} }
func (c *sentinelFailover) closeSentinel() error { func (c *sentinelFailover) closeSentinel() error {
var firstErr error firstErr := c.pubsub.Close()
err := c.pubsub.Close()
if err != nil && firstErr == nil {
firstErr = err
}
c.pubsub = nil c.pubsub = nil
err = c.sentinel.Close() err := c.sentinel.Close()
if err != nil && firstErr == nil { if err != nil && firstErr == nil {
firstErr = err firstErr = err
} }