Fix build

This commit is contained in:
Vladimir Mihailenco 2020-09-09 17:39:13 +03:00
parent 4877841b11
commit 457cdea58f
5 changed files with 79 additions and 43 deletions

View File

@ -259,7 +259,7 @@ func BenchmarkClusterPing(b *testing.B) {
if err := startCluster(ctx, cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)
defer cluster.Close()
client := cluster.newClusterClient(ctx, redisClusterOptions())
defer client.Close()
@ -286,7 +286,7 @@ func BenchmarkClusterSetString(b *testing.B) {
if err := startCluster(ctx, cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)
defer cluster.Close()
client := cluster.newClusterClient(ctx, redisClusterOptions())
defer client.Close()
@ -315,7 +315,7 @@ func BenchmarkClusterReloadState(b *testing.B) {
if err := startCluster(ctx, cluster); err != nil {
b.Fatal(err)
}
defer stopCluster(cluster)
defer cluster.Close()
client := cluster.newClusterClient(ctx, redisClusterOptions())
defer client.Close()

View File

@ -80,6 +80,14 @@ func (s *clusterScenario) newClusterClient(
return client
}
func (s *clusterScenario) Close() error {
for _, port := range s.ports {
processes[port].Close()
delete(processes, port)
}
return nil
}
func startCluster(ctx context.Context, scenario *clusterScenario) error {
// Start processes and collect node ids
for pos, port := range scenario.ports {
@ -221,20 +229,6 @@ func slotEqual(s1, s2 redis.ClusterSlot) bool {
return true
}
func stopCluster(scenario *clusterScenario) error {
for _, client := range scenario.clients {
if err := client.Close(); err != nil {
return err
}
}
for _, process := range scenario.processes {
if err := process.Close(); err != nil {
return err
}
}
return nil
}
//------------------------------------------------------------------------------
var _ = Describe("ClusterClient", func() {

View File

@ -4029,7 +4029,7 @@ var _ = Describe("Commands", func() {
result, err := client.SlowLog(ctx, -1).Result()
Expect(err).NotTo(HaveOccurred())
Expect(len(result)).To(Equal(2))
Expect(len(result)).NotTo(BeZero())
})
})
})

View File

@ -41,12 +41,14 @@ const (
)
var (
sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
processes map[string]*redisProcess
redisMain *redisProcess
ringShard1, ringShard2, ringShard3 *redisProcess
sentinelMaster, sentinelSlave1, sentinelSlave2 *redisProcess
sentinel1, sentinel2, sentinel3 *redisProcess
sentinelAddrs = []string{":" + sentinelPort1, ":" + sentinelPort2, ":" + sentinelPort3}
)
var cluster = &clusterScenario{
@ -56,6 +58,13 @@ var cluster = &clusterScenario{
clients: make(map[string]*redis.Client, 6),
}
func registerProcess(port string, p *redisProcess) {
if processes == nil {
processes = make(map[string]*redisProcess)
}
processes[port] = p
}
var _ = BeforeSuite(func() {
var err error
@ -95,20 +104,12 @@ var _ = BeforeSuite(func() {
})
var _ = AfterSuite(func() {
Expect(redisMain.Close()).NotTo(HaveOccurred())
Expect(cluster.Close()).NotTo(HaveOccurred())
Expect(ringShard1.Close()).NotTo(HaveOccurred())
Expect(ringShard2.Close()).NotTo(HaveOccurred())
Expect(ringShard3.Close()).NotTo(HaveOccurred())
Expect(sentinel1.Close()).NotTo(HaveOccurred())
Expect(sentinel2.Close()).NotTo(HaveOccurred())
Expect(sentinel3.Close()).NotTo(HaveOccurred())
Expect(sentinelSlave1.Close()).NotTo(HaveOccurred())
Expect(sentinelSlave2.Close()).NotTo(HaveOccurred())
Expect(sentinelMaster.Close()).NotTo(HaveOccurred())
Expect(stopCluster(cluster)).NotTo(HaveOccurred())
for _, p := range processes {
Expect(p.Close()).NotTo(HaveOccurred())
}
processes = nil
})
func TestGinkgoSuite(t *testing.T) {
@ -308,7 +309,10 @@ func startRedis(port string, args ...string) (*redisProcess, error) {
process.Kill()
return nil, err
}
return &redisProcess{process, client}, err
p := &redisProcess{process, client}
registerProcess(port, p)
return p, err
}
func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
@ -316,15 +320,18 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
if err != nil {
return nil, err
}
process, err := execCmd(redisServerBin, os.DevNull, "--sentinel", "--port", port, "--dir", dir)
if err != nil {
return nil, err
}
client, err := connectTo(port)
if err != nil {
process.Kill()
return nil, err
}
for _, cmd := range []*redis.StatusCmd{
redis.NewStatusCmd(ctx, "SENTINEL", "MONITOR", masterName, "127.0.0.1", masterPort, "2"),
redis.NewStatusCmd(ctx, "SENTINEL", "SET", masterName, "down-after-milliseconds", "500"),
@ -337,7 +344,10 @@ func startSentinel(port, masterName, masterPort string) (*redisProcess, error) {
return nil, err
}
}
return &redisProcess{process, client}, nil
p := &redisProcess{process, client}
registerProcess(port, p)
return p, nil
}
//------------------------------------------------------------------------------

View File

@ -1,6 +1,8 @@
package redis_test
import (
"net"
"github.com/go-redis/redis/v8"
. "github.com/onsi/ginkgo"
@ -9,6 +11,8 @@ import (
var _ = Describe("Sentinel", func() {
var client *redis.Client
var master *redis.Client
var masterPort string
BeforeEach(func() {
client = redis.NewFailoverClient(&redis.FailoverOptions{
@ -16,10 +20,23 @@ var _ = Describe("Sentinel", func() {
SentinelAddrs: sentinelAddrs,
})
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
sentinel := redis.NewSentinelClient(&redis.Options{
Addr: ":" + sentinelPort1,
})
addr, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
Expect(err).NotTo(HaveOccurred())
master = redis.NewClient(&redis.Options{
Addr: net.JoinHostPort(addr[0], addr[1]),
})
masterPort = addr[1]
})
AfterEach(func() {
Expect(client.Close()).NotTo(HaveOccurred())
Expect(master.Close()).NotTo(HaveOccurred())
})
It("should facilitate failover", func() {
@ -55,9 +72,10 @@ var _ = Describe("Sentinel", func() {
}, "15s", "100ms").Should(ContainSubstring("slaves=2"))
// Kill master.
sentinelMaster.Shutdown(ctx)
err = master.Shutdown(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() error {
return sentinelMaster.Ping(ctx).Err()
return master.Ping(ctx).Err()
}, "15s", "100ms").Should(HaveOccurred())
// Wait for Redis sentinel to elect new master.
@ -79,8 +97,7 @@ var _ = Describe("Sentinel", func() {
Expect(msg.Channel).To(Equal("foo"))
Expect(msg.Payload).To(Equal("hello"))
Expect(sentinelMaster.Close()).NotTo(HaveOccurred())
sentinelMaster, err = startRedis(sentinelMasterPort)
_, err = startRedis(masterPort)
Expect(err).NotTo(HaveOccurred())
})
@ -99,6 +116,8 @@ var _ = Describe("Sentinel", func() {
var _ = Describe("NewFailoverClusterClient", func() {
var client *redis.ClusterClient
var master *redis.Client
var masterPort string
BeforeEach(func() {
client = redis.NewFailoverClusterClient(&redis.FailoverOptions{
@ -106,14 +125,27 @@ var _ = Describe("NewFailoverClusterClient", func() {
SentinelAddrs: sentinelAddrs,
})
Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred())
sentinel := redis.NewSentinelClient(&redis.Options{
Addr: ":" + sentinelPort1,
})
addr, err := sentinel.GetMasterAddrByName(ctx, sentinelName).Result()
Expect(err).NotTo(HaveOccurred())
master = redis.NewClient(&redis.Options{
Addr: net.JoinHostPort(addr[0], addr[1]),
})
masterPort = addr[1]
})
AfterEach(func() {
Expect(client.Close()).NotTo(HaveOccurred())
Expect(master.Close()).NotTo(HaveOccurred())
})
It("should facilitate failover", func() {
// Set value on master.
// Set value.
err := client.Set(ctx, "foo", "master", 0).Err()
Expect(err).NotTo(HaveOccurred())
@ -145,7 +177,8 @@ var _ = Describe("NewFailoverClusterClient", func() {
}, "15s", "100ms").Should(ContainSubstring("slaves=2"))
// Kill master.
sentinelMaster.Shutdown(ctx)
err = master.Shutdown(ctx).Err()
Expect(err).NotTo(HaveOccurred())
Eventually(func() error {
return sentinelMaster.Ping(ctx).Err()
}, "15s", "100ms").Should(HaveOccurred())
@ -169,8 +202,7 @@ var _ = Describe("NewFailoverClusterClient", func() {
Expect(msg.Channel).To(Equal("foo"))
Expect(msg.Payload).To(Equal("hello"))
Expect(sentinelMaster.Close()).NotTo(HaveOccurred())
sentinelMaster, err = startRedis(sentinelMasterPort)
_, err = startRedis(masterPort)
Expect(err).NotTo(HaveOccurred())
})
})