Fix build

This commit is contained in:
Vladimir Mihailenco 2020-09-09 17:39:13 +03:00
parent 4877841b11
commit 457cdea58f
5 changed files with 79 additions and 43 deletions

View File

@ -259,7 +259,7 @@ func BenchmarkClusterPing(b *testing.B) {
if err := startCluster(ctx, cluster); err != nil { if err := startCluster(ctx, cluster); err != nil {
b.Fatal(err) b.Fatal(err)
} }
defer stopCluster(cluster) defer cluster.Close()
client := cluster.newClusterClient(ctx, redisClusterOptions()) client := cluster.newClusterClient(ctx, redisClusterOptions())
defer client.Close() defer client.Close()
@ -286,7 +286,7 @@ func BenchmarkClusterSetString(b *testing.B) {
if err := startCluster(ctx, cluster); err != nil { if err := startCluster(ctx, cluster); err != nil {
b.Fatal(err) b.Fatal(err)
} }
defer stopCluster(cluster) defer cluster.Close()
client := cluster.newClusterClient(ctx, redisClusterOptions()) client := cluster.newClusterClient(ctx, redisClusterOptions())
defer client.Close() defer client.Close()
@ -315,7 +315,7 @@ func BenchmarkClusterReloadState(b *testing.B) {
if err := startCluster(ctx, cluster); err != nil { if err := startCluster(ctx, cluster); err != nil {
b.Fatal(err) b.Fatal(err)
} }
defer stopCluster(cluster) defer cluster.Close()
client := cluster.newClusterClient(ctx, redisClusterOptions()) client := cluster.newClusterClient(ctx, redisClusterOptions())
defer client.Close() defer client.Close()

View File

@ -80,6 +80,14 @@ func (s *clusterScenario) newClusterClient(
return client return client
} }
func (s *clusterScenario) Close() error {
for _, port := range s.ports {
processes[port].Close()
delete(processes, port)
}
return nil
}
func startCluster(ctx context.Context, scenario *clusterScenario) error { func startCluster(ctx context.Context, scenario *clusterScenario) error {
// Start processes and collect node ids // Start processes and collect node ids
for pos, port := range scenario.ports { for pos, port := range scenario.ports {
@ -221,20 +229,6 @@ func slotEqual(s1, s2 redis.ClusterSlot) bool {
return true return true
} }
func stopCluster(scenario *clusterScenario) error {
for _, client := range scenario.clients {
if err := client.Close(); err != nil {
return err
}
}
for _, process := range scenario.processes {
if err := process.Close(); err != nil {
return err
}
}
return nil
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
var _ = Describe("ClusterClient", func() { var _ = Describe("ClusterClient", func() {

View File

@ -4029,7 +4029,7 @@ var _ = Describe("Commands", func() {
result, err := client.SlowLog(ctx, -1).Result() result, err := client.SlowLog(ctx, -1).Result()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(result)).To(Equal(2)) Expect(len(result)).NotTo(BeZero())
}) })
}) })
}) })

View File

@ -41,12 +41,14 @@ const (
) )
var ( var (
sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
processes map[string]*redisProcess
redisMain *redisProcess redisMain *redisProcess
ringShard1, ringShard2, ringShard3 *redisProcess ringShard1, ringShard2, ringShard3 *redisProcess
sentinelMaster, sentinelSlave1, sentinelSlave2 *redisProcess sentinelMaster, sentinelSlave1, sentinelSlave2 *redisProcess
sentinel1, sentinel2, sentinel3 *redisProcess sentinel1, sentinel2, sentinel3 *redisProcess
sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
) )
var cluster = &clusterScenario{ var cluster = &clusterScenario{
@ -56,6 +58,13 @@ var cluster = &clusterScenario{
clients: make(map[string]*redis.Client, 6), clients: make(map[string]*redis.Client, 6),
} }
func registerProcess(port string, p *redisProcess) {
if processes == nil {
processes = make(map[string]*redisProcess)
}
processes[port] = p
}
var _ = BeforeSuite(func() { var _ = BeforeSuite(func() {
var err error var err error
@ -95,20 +104,12 @@ var _ = BeforeSuite(func() {
}) })
var _ = AfterSuite(func() { var _ = AfterSuite(func() {
Expect(redisMain.Close()).NotTo(HaveOccurred()) Expect(cluster.Close()).NotTo(HaveOccurred())
Expect(ringShard1.Close()).NotTo(HaveOccurred()) for _, p := range processes {
Expect(ringShard2.Close()).NotTo(HaveOccurred()) Expect(p.Close()).NotTo(HaveOccurred())
Expect(ringShard3.Close()).NotTo(HaveOccurred()) }
processes = nil
Expect(sentinel1.Close()).NotTo(HaveOccurred())
Expect(sentinel2.Close()).NotTo(HaveOccurred())
Expect(sentinel3.Close()).NotTo(HaveOccurred())
Expect(sentinelSlave1.Close()).NotTo(HaveOccurred())
Expect(sentinelSlave2.Close()).NotTo(HaveOccurred())
Expect(sentinelMaster.Close()).NotTo(HaveOccurred())
Expect(stopCluster(cluster)).NotTo(HaveOccurred())
}) })
func TestGinkgoSuite(t *testing.T) { func TestGinkgoSuite(t *testing.T) {
@ -308,7 +309,10 @@ func startRedis(port string, args ...string) (*redisProcess, error) {
process.Kill() process.Kill()
return nil, err return nil, err
} }
return &redisProcess{process, client}, err
p := &redisProcess{process, client}
registerProcess(port, p)
return p, err
} }
func startSentinel(port, masterName, masterPort string) (*redisProcess, error) { func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
@ -316,15 +320,18 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir) process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
client, err := connectTo(port) client, err := connectTo(port)
if err != nil { if err != nil {
process.Kill() process.Kill()
return nil, err return nil, err
} }
for _, cmd := range []*redis.StatusCmd{ for _, cmd := range []*redis.StatusCmd{
redis.NewStatusCmd(ctx, "SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "2"), redis.NewStatusCmd(ctx, "SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "2"),
redis.NewStatusCmd(ctx, "SENTINEL", "SET", masterName, "down-after-milliseconds", "500"), redis.NewStatusCmd(ctx, "SENTINEL", "SET", masterName, "down-after-milliseconds", "500"),
@ -337,7 +344,10 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
return nil, err return nil, err
} }
} }
return &redisProcess{process, client}, nil
p := &redisProcess{process, client}
registerProcess(port, p)
return p, nil
} }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -1,6 +1,8 @@
package redis_test package redis_test
import ( import (
"net"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -9,6 +11,8 @@ import (
var _ = Describe("Sentinel", func() { var _ = Describe("Sentinel", func() {
var client *redis.Client var client *redis.Client
var master *redis.Client
var masterPort string
BeforeEach(func() { BeforeEach(func() {
client = redis.NewFailoverClient(&redis.FailoverOptions{ client = redis.NewFailoverClient(&redis.FailoverOptions{
@ -16,10 +20,23 @@ var _ = Describe("Sentinel", func() {
SentinelAddrs: sentinelAddrs, SentinelAddrs: sentinelAddrs,
}) })
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
sentinel := redis.NewSentinelClient(&redis.Options{
Addr: ":" + sentinelPort1,
})
addr, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
Expect(err).NotTo(HaveOccurred())
master = redis.NewClient(&redis.Options{
Addr: net.JoinHostPort(addr[0], addr[1]),
})
masterPort = addr[1]
}) })
AfterEach(func() { AfterEach(func() {
Expect(client.Close()).NotTo(HaveOccurred()) Expect(client.Close()).NotTo(HaveOccurred())
Expect(master.Close()).NotTo(HaveOccurred())
}) })
It("should facilitate failover", func() { It("should facilitate failover", func() {
@ -55,9 +72,10 @@ var _ = Describe("Sentinel", func() {
}, "15s", "100ms").Should(ContainSubstring("slaves=2")) }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
// Kill master. // Kill master.
sentinelMaster.Shutdown(ctx) err = master.Shutdown(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() error { Eventually(func() error {
return sentinelMaster.Ping(ctx).Err() return master.Ping(ctx).Err()
}, "15s", "100ms").Should(HaveOccurred()) }, "15s", "100ms").Should(HaveOccurred())
// Wait for Redis sentinel to elect new master. // Wait for Redis sentinel to elect new master.
@ -79,8 +97,7 @@ var _ = Describe("Sentinel", func() {
Expect(msg.Channel).To(Equal("foo")) Expect(msg.Channel).To(Equal("foo"))
Expect(msg.Payload).To(Equal("hello")) Expect(msg.Payload).To(Equal("hello"))
Expect(sentinelMaster.Close()).NotTo(HaveOccurred()) _, err = startRedis(masterPort)
sentinelMaster, err = startRedis(sentinelMasterPort)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -99,6 +116,8 @@ var _ = Describe("Sentinel", func() {
var _ = Describe("NewFailoverClusterClient", func() { var _ = Describe("NewFailoverClusterClient", func() {
var client *redis.ClusterClient var client *redis.ClusterClient
var master *redis.Client
var masterPort string
BeforeEach(func() { BeforeEach(func() {
client = redis.NewFailoverClusterClient(&redis.FailoverOptions{ client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
@ -106,14 +125,27 @@ var _ = Describe("NewFailoverClusterClient", func() {
SentinelAddrs: sentinelAddrs, SentinelAddrs: sentinelAddrs,
}) })
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
sentinel := redis.NewSentinelClient(&redis.Options{
Addr: ":" + sentinelPort1,
})
addr, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
Expect(err).NotTo(HaveOccurred())
master = redis.NewClient(&redis.Options{
Addr: net.JoinHostPort(addr[0], addr[1]),
})
masterPort = addr[1]
}) })
AfterEach(func() { AfterEach(func() {
Expect(client.Close()).NotTo(HaveOccurred()) Expect(client.Close()).NotTo(HaveOccurred())
Expect(master.Close()).NotTo(HaveOccurred())
}) })
It("should facilitate failover", func() { It("should facilitate failover", func() {
// Set value on master. // Set value.
err := client.Set(ctx, "foo", "master", 0).Err() err := client.Set(ctx, "foo", "master", 0).Err()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -145,7 +177,8 @@ var _ = Describe("NewFailoverClusterClient", func() {
}, "15s", "100ms").Should(ContainSubstring("slaves=2")) }, "15s", "100ms").Should(ContainSubstring("slaves=2"))
// Kill master. // Kill master.
sentinelMaster.Shutdown(ctx) err = master.Shutdown(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() error { Eventually(func() error {
return sentinelMaster.Ping(ctx).Err() return sentinelMaster.Ping(ctx).Err()
}, "15s", "100ms").Should(HaveOccurred()) }, "15s", "100ms").Should(HaveOccurred())
@ -169,8 +202,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
Expect(msg.Channel).To(Equal("foo")) Expect(msg.Channel).To(Equal("foo"))
Expect(msg.Payload).To(Equal("hello")) Expect(msg.Payload).To(Equal("hello"))
Expect(sentinelMaster.Close()).NotTo(HaveOccurred()) _, err = startRedis(masterPort)
sentinelMaster, err = startRedis(sentinelMasterPort)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })