mirror of https://github.com/go-redis/redis.git
Merge pull request #192 from go-redis/fix/doc-pool-timeout
Fix doc comment for PoolTimeout.
This commit is contained in:
commit
b162372a68
|
@ -1,13 +1,18 @@
|
||||||
language: go
|
|
||||||
sudo: false
|
sudo: false
|
||||||
|
language: go
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- redis-server
|
- redis-server
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.3
|
- 1.3
|
||||||
- 1.4
|
- 1.4
|
||||||
- 1.5
|
- 1.5
|
||||||
|
- tip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- go get gopkg.in/bsm/ratelimit.v1
|
- go get gopkg.in/bsm/ratelimit.v1
|
||||||
|
|
6
Makefile
6
Makefile
|
@ -1,9 +1,7 @@
|
||||||
all: testdeps
|
all: testdeps
|
||||||
go test ./... -test.v -test.cpu=1,2,4
|
go test ./... -test.v -test.cpu=1,2,4
|
||||||
go test ./... -test.short -test.race
|
sleep 3 # give Redis time to exit
|
||||||
|
go test ./... -test.v -test.short -test.race
|
||||||
test: testdeps
|
|
||||||
go test ./... -test.v=1
|
|
||||||
|
|
||||||
testdeps: .test/redis/src/redis-server
|
testdeps: .test/redis/src/redis-server
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -116,12 +117,23 @@ func startCluster(scenario *clusterScenario) error {
|
||||||
// Wait until all nodes have consistent info
|
// Wait until all nodes have consistent info
|
||||||
for _, client := range scenario.clients {
|
for _, client := range scenario.clients {
|
||||||
err := eventually(func() error {
|
err := eventually(func() error {
|
||||||
for _, masterId := range scenario.nodeIds[:3] {
|
res, err := client.ClusterSlots().Result()
|
||||||
s := client.ClusterNodes().Val()
|
if err != nil {
|
||||||
wanted := "slave " + masterId
|
return err
|
||||||
if !strings.Contains(s, wanted) {
|
|
||||||
return fmt.Errorf("%q does not contain %q", s, wanted)
|
|
||||||
}
|
}
|
||||||
|
wanted := []redis.ClusterSlotInfo{
|
||||||
|
{0, 4999, []string{"127.0.0.1:8220", "127.0.0.1:8223"}},
|
||||||
|
{5000, 9999, []string{"127.0.0.1:8221", "127.0.0.1:8224"}},
|
||||||
|
{10000, 16383, []string{"127.0.0.1:8222", "127.0.0.1:8225"}},
|
||||||
|
}
|
||||||
|
loop:
|
||||||
|
for _, info := range res {
|
||||||
|
for _, info2 := range wanted {
|
||||||
|
if reflect.DeepEqual(info, info2) {
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("cluster did not reach consistent state (%v)", res)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 10*time.Second)
|
}, 10*time.Second)
|
||||||
|
|
|
@ -21,8 +21,6 @@ var _ = Describe("Commands", func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
client = redis.NewClient(&redis.Options{
|
client = redis.NewClient(&redis.Options{
|
||||||
Addr: redisAddr,
|
Addr: redisAddr,
|
||||||
ReadTimeout: 500 * time.Millisecond,
|
|
||||||
WriteTimeout: 500 * time.Millisecond,
|
|
||||||
PoolTimeout: 30 * time.Second,
|
PoolTimeout: 30 * time.Second,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -81,13 +79,10 @@ var _ = Describe("Commands", func() {
|
||||||
err := client.ClientPause(time.Second).Err()
|
err := client.ClientPause(time.Second).Err()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
Consistently(func() error {
|
start := time.Now()
|
||||||
return client.Ping().Err()
|
err = client.Ping().Err()
|
||||||
}, "400ms").Should(HaveOccurred()) // pause time - read timeout
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(time.Now()).To(BeTemporally("~", start.Add(time.Second), 800*time.Millisecond))
|
||||||
Eventually(func() error {
|
|
||||||
return client.Ping().Err()
|
|
||||||
}, "1s").ShouldNot(HaveOccurred())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should ClientSetName and ClientGetName", func() {
|
It("should ClientSetName and ClientGetName", func() {
|
||||||
|
|
|
@ -238,6 +238,7 @@ func ExamplePubSub_Receive() {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
|
// ReceiveTimeout is a low level API. Use ReceiveMessage instead.
|
||||||
msgi, err := pubsub.ReceiveTimeout(100 * time.Millisecond)
|
msgi, err := pubsub.ReceiveTimeout(100 * time.Millisecond)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
|
@ -80,6 +80,9 @@ func (c *PubSub) PSubscribe(patterns ...string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func remove(ss []string, es ...string) []string {
|
func remove(ss []string, es ...string) []string {
|
||||||
|
if len(es) == 0 {
|
||||||
|
return ss[:0]
|
||||||
|
}
|
||||||
for _, e := range es {
|
for _, e := range es {
|
||||||
for i, s := range ss {
|
for i, s := range ss {
|
||||||
if s == e {
|
if s == e {
|
||||||
|
@ -231,7 +234,9 @@ func (c *PubSub) Receive() (interface{}, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PubSub) reconnect() {
|
func (c *PubSub) reconnect() {
|
||||||
|
// Close current connection.
|
||||||
c.connPool.Remove(nil) // nil to force removal
|
c.connPool.Remove(nil) // nil to force removal
|
||||||
|
|
||||||
if len(c.channels) > 0 {
|
if len(c.channels) > 0 {
|
||||||
if err := c.Subscribe(c.channels...); err != nil {
|
if err := c.Subscribe(c.channels...); err != nil {
|
||||||
log.Printf("redis: Subscribe failed: %s", err)
|
log.Printf("redis: Subscribe failed: %s", err)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package redis_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
@ -261,19 +262,23 @@ var _ = Describe("PubSub", func() {
|
||||||
writeErr: errTimeout,
|
writeErr: errTimeout,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
n, err := client.Publish("mychannel", "hello").Result()
|
err := client.Publish("mychannel", "hello").Err()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(n).To(Equal(int64(2)))
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
msg, err := pubsub.ReceiveMessage()
|
msg, err := pubsub.ReceiveMessage()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(msg.Channel).To(Equal("mychannel"))
|
Expect(msg.Channel).To(Equal("mychannel"))
|
||||||
Expect(msg.Payload).To(Equal("hello"))
|
Expect(msg.Payload).To(Equal("hello"))
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
2
redis.go
2
redis.go
|
@ -121,7 +121,7 @@ type Options struct {
|
||||||
PoolSize int
|
PoolSize int
|
||||||
// Specifies amount of time client waits for connection if all
|
// Specifies amount of time client waits for connection if all
|
||||||
// connections are busy before returning an error.
|
// connections are busy before returning an error.
|
||||||
// Default is 5 seconds.
|
// Default is 1 seconds.
|
||||||
PoolTimeout time.Duration
|
PoolTimeout time.Duration
|
||||||
// Specifies amount of time after which client closes idle
|
// Specifies amount of time after which client closes idle
|
||||||
// connections. Should be less than server's timeout.
|
// connections. Should be less than server's timeout.
|
||||||
|
|
2
ring.go
2
ring.go
|
@ -95,7 +95,7 @@ func (shard *ringShard) Vote(up bool) bool {
|
||||||
// keys across multiple Redis servers (shards). It's safe for
|
// keys across multiple Redis servers (shards). It's safe for
|
||||||
// concurrent use by multiple goroutines.
|
// concurrent use by multiple goroutines.
|
||||||
//
|
//
|
||||||
// It monitors the state of each shard and removes dead shards from
|
// Ring monitors the state of each shard and removes dead shards from
|
||||||
// the ring. When shard comes online it is added back to the ring. This
|
// the ring. When shard comes online it is added back to the ring. This
|
||||||
// gives you maximum availability and partition tolerance, but no
|
// gives you maximum availability and partition tolerance, but no
|
||||||
// consistency between different shards or even clients. Each client
|
// consistency between different shards or even clients. Each client
|
||||||
|
|
|
@ -56,7 +56,7 @@ var _ = Describe("Redis ring", func() {
|
||||||
// Ring needs 5 * heartbeat time to detect that node is down.
|
// Ring needs 5 * heartbeat time to detect that node is down.
|
||||||
// Give it more to be sure.
|
// Give it more to be sure.
|
||||||
heartbeat := 100 * time.Millisecond
|
heartbeat := 100 * time.Millisecond
|
||||||
time.Sleep(5*heartbeat + heartbeat)
|
time.Sleep(2 * 5 * heartbeat)
|
||||||
|
|
||||||
setRingKeys()
|
setRingKeys()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue