2013-02-17 20:49:17 +04:00
|
|
|
package redis_test
|
|
|
|
|
|
|
|
import (
|
2020-03-11 17:26:42 +03:00
|
|
|
"context"
|
2024-02-23 09:51:20 +03:00
|
|
|
"encoding/json"
|
2018-12-19 23:57:44 +03:00
|
|
|
"errors"
|
2013-02-17 20:49:17 +04:00
|
|
|
"fmt"
|
2015-06-05 15:02:57 +03:00
|
|
|
"sync"
|
2015-05-23 14:15:05 +03:00
|
|
|
"time"
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2023-01-23 09:48:54 +03:00
|
|
|
"github.com/redis/go-redis/v9"
|
2013-02-17 20:49:17 +04:00
|
|
|
)
|
|
|
|
|
2020-07-16 09:52:07 +03:00
|
|
|
var (
|
|
|
|
ctx = context.Background()
|
|
|
|
rdb *redis.Client
|
|
|
|
)
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2014-05-11 11:42:40 +04:00
|
|
|
func init() {
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb = redis.NewClient(&redis.Options{
|
2016-03-19 17:55:22 +03:00
|
|
|
Addr: ":6379",
|
|
|
|
DialTimeout: 10 * time.Second,
|
|
|
|
ReadTimeout: 30 * time.Second,
|
|
|
|
WriteTimeout: 30 * time.Second,
|
|
|
|
PoolSize: 10,
|
|
|
|
PoolTimeout: 30 * time.Second,
|
|
|
|
})
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2015-05-02 16:19:22 +03:00
|
|
|
func ExampleNewClient() {
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewClient(&redis.Options{
|
2019-01-13 11:36:38 +03:00
|
|
|
Addr: "localhost:6379", // use default Addr
|
|
|
|
Password: "", // no password set
|
|
|
|
DB: 0, // use default DB
|
2014-05-11 11:42:40 +04:00
|
|
|
})
|
2014-07-13 16:49:33 +04:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
pong, err := rdb.Ping(ctx).Result()
|
2014-07-13 16:49:33 +04:00
|
|
|
fmt.Println(pong, err)
|
|
|
|
// Output: PONG <nil>
|
|
|
|
}
|
|
|
|
|
2017-06-09 13:55:45 +03:00
|
|
|
func ExampleParseURL() {
|
2021-09-10 22:12:33 +03:00
|
|
|
opt, err := redis.ParseURL("redis://:qwerty@localhost:6379/1?dial_timeout=5s")
|
2017-06-09 13:55:45 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
fmt.Println("addr is", opt.Addr)
|
|
|
|
fmt.Println("db is", opt.DB)
|
|
|
|
fmt.Println("password is", opt.Password)
|
2021-09-10 22:12:33 +03:00
|
|
|
fmt.Println("dial timeout is", opt.DialTimeout)
|
2017-06-09 13:55:45 +03:00
|
|
|
|
|
|
|
// Create client as usually.
|
|
|
|
_ = redis.NewClient(opt)
|
|
|
|
|
|
|
|
// Output: addr is localhost:6379
|
|
|
|
// db is 1
|
|
|
|
// password is qwerty
|
2021-09-10 22:12:33 +03:00
|
|
|
// dial timeout is 5s
|
2017-06-09 13:55:45 +03:00
|
|
|
}
|
|
|
|
|
2014-07-13 16:49:33 +04:00
|
|
|
func ExampleNewFailoverClient() {
|
2015-05-23 16:35:30 +03:00
|
|
|
// See http://redis.io/topics/sentinel for instructions how to
|
|
|
|
// setup Redis Sentinel.
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewFailoverClient(&redis.FailoverOptions{
|
2015-01-25 15:33:30 +03:00
|
|
|
MasterName: "master",
|
2014-07-13 16:49:33 +04:00
|
|
|
SentinelAddrs: []string{":26379"},
|
|
|
|
})
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2015-05-23 16:35:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func ExampleNewClusterClient() {
|
|
|
|
// See http://redis.io/topics/cluster-tutorial for instructions
|
|
|
|
// how to setup Redis Cluster.
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
2015-05-23 16:35:30 +03:00
|
|
|
Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
|
|
|
|
})
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2018-06-29 10:45:05 +03:00
|
|
|
// Following example creates a cluster from 2 master nodes and 2 slave nodes
|
|
|
|
// without using cluster mode or Redis Sentinel.
|
|
|
|
func ExampleNewClusterClient_manualSetup() {
|
2018-07-18 15:28:51 +03:00
|
|
|
// clusterSlots returns cluster slots information.
|
|
|
|
// It can use service like ZooKeeper to maintain configuration information
|
|
|
|
// and Cluster.ReloadState to manually trigger state reloading.
|
2020-09-09 15:27:17 +03:00
|
|
|
clusterSlots := func(ctx context.Context) ([]redis.ClusterSlot, error) {
|
2018-06-29 10:45:05 +03:00
|
|
|
slots := []redis.ClusterSlot{
|
|
|
|
// First node with 1 master and 1 slave.
|
|
|
|
{
|
|
|
|
Start: 0,
|
|
|
|
End: 8191,
|
|
|
|
Nodes: []redis.ClusterNode{{
|
|
|
|
Addr: ":7000", // master
|
|
|
|
}, {
|
|
|
|
Addr: ":8000", // 1st slave
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
// Second node with 1 master and 1 slave.
|
|
|
|
{
|
|
|
|
Start: 8192,
|
|
|
|
End: 16383,
|
|
|
|
Nodes: []redis.ClusterNode{{
|
|
|
|
Addr: ":7001", // master
|
|
|
|
}, {
|
|
|
|
Addr: ":8001", // 1st slave
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return slots, nil
|
|
|
|
}
|
|
|
|
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewClusterClient(&redis.ClusterOptions{
|
2018-07-18 15:28:51 +03:00
|
|
|
ClusterSlots: clusterSlots,
|
2018-06-29 10:45:05 +03:00
|
|
|
RouteRandomly: true,
|
|
|
|
})
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2018-06-29 10:45:05 +03:00
|
|
|
|
2018-07-18 15:28:51 +03:00
|
|
|
// ReloadState reloads cluster state. It calls ClusterSlots func
|
|
|
|
// to get cluster slots information.
|
2020-09-11 15:52:38 +03:00
|
|
|
rdb.ReloadState(ctx)
|
2018-06-29 10:45:05 +03:00
|
|
|
}
|
|
|
|
|
2015-06-05 15:02:57 +03:00
|
|
|
func ExampleNewRing() {
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewRing(&redis.RingOptions{
|
2015-06-05 15:02:57 +03:00
|
|
|
Addrs: map[string]string{
|
|
|
|
"shard1": ":7000",
|
|
|
|
"shard2": ":7001",
|
|
|
|
"shard3": ":7002",
|
|
|
|
},
|
|
|
|
})
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2015-06-05 15:02:57 +03:00
|
|
|
}
|
|
|
|
|
2014-05-11 11:42:40 +04:00
|
|
|
func ExampleClient() {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := rdb.Set(ctx, "key", "value", 0).Err()
|
2015-05-23 14:33:33 +03:00
|
|
|
if err != nil {
|
2014-07-31 16:18:23 +04:00
|
|
|
panic(err)
|
|
|
|
}
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
val, err := rdb.Get(ctx, "key").Result()
|
2015-05-23 14:33:33 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
fmt.Println("key", val)
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
val2, err := rdb.Get(ctx, "missing_key").Result()
|
2015-05-23 14:33:33 +03:00
|
|
|
if err == redis.Nil {
|
2018-08-06 13:59:15 +03:00
|
|
|
fmt.Println("missing_key does not exist")
|
2015-05-23 14:33:33 +03:00
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
} else {
|
2018-08-06 13:59:15 +03:00
|
|
|
fmt.Println("missing_key", val2)
|
2015-05-23 14:33:33 +03:00
|
|
|
}
|
|
|
|
// Output: key value
|
2018-08-06 13:59:15 +03:00
|
|
|
// missing_key does not exist
|
2014-07-31 16:18:23 +04:00
|
|
|
}
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2024-02-14 23:40:20 +03:00
|
|
|
func ExampleConn_name() {
|
2022-07-13 08:49:28 +03:00
|
|
|
conn := rdb.Conn()
|
2019-08-03 17:21:12 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
err := conn.ClientSetName(ctx, "foobar").Err()
|
2019-08-03 17:21:12 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open other connections.
|
|
|
|
for i := 0; i < 10; i++ {
|
2020-03-11 17:26:42 +03:00
|
|
|
go rdb.Ping(ctx)
|
2019-08-03 17:21:12 +03:00
|
|
|
}
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
s, err := conn.ClientGetName(ctx).Result()
|
2019-08-03 17:21:12 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
fmt.Println(s)
|
|
|
|
// Output: foobar
|
|
|
|
}
|
|
|
|
|
2024-02-14 23:40:20 +03:00
|
|
|
func ExampleConn_client_setInfo_libraryVersion() {
|
|
|
|
conn := rdb.Conn()
|
|
|
|
|
|
|
|
err := conn.ClientSetInfo(ctx, redis.WithLibraryVersion("1.2.3")).Err()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open other connections.
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
go rdb.Ping(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
s, err := conn.ClientInfo(ctx).Result()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Println(s.LibVer)
|
|
|
|
// Output: 1.2.3
|
|
|
|
}
|
|
|
|
|
2015-08-07 17:02:17 +03:00
|
|
|
func ExampleClient_Set() {
|
|
|
|
// Last argument is expiration. Zero means the key has no
|
|
|
|
// expiration time.
|
2020-03-11 17:26:42 +03:00
|
|
|
err := rdb.Set(ctx, "key", "value", 0).Err()
|
2015-08-07 17:02:17 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// key2 will expire in an hour.
|
2020-03-11 17:26:42 +03:00
|
|
|
err = rdb.Set(ctx, "key2", "value", time.Hour).Err()
|
2015-08-07 17:02:17 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-28 12:40:38 +03:00
|
|
|
func ExampleClient_SetEx() {
|
|
|
|
err := rdb.SetEx(ctx, "key", "value", time.Hour).Err()
|
2020-10-22 21:38:36 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-17 05:54:43 +03:00
|
|
|
func ExampleClient_HSet() {
|
2023-01-19 07:00:04 +03:00
|
|
|
// Set "redis" tag for hash key
|
2022-04-28 14:32:58 +03:00
|
|
|
type ExampleUser struct {
|
2023-01-19 07:00:04 +03:00
|
|
|
Name string `redis:"name"`
|
|
|
|
Age int `redis:"age"`
|
2021-12-17 05:54:43 +03:00
|
|
|
}
|
2022-04-28 14:32:58 +03:00
|
|
|
|
|
|
|
items := ExampleUser{"jane", 22}
|
|
|
|
|
|
|
|
err := rdb.HSet(ctx, "user:1", items).Err()
|
2021-12-17 05:54:43 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-31 16:18:23 +04:00
|
|
|
func ExampleClient_Incr() {
|
2020-03-11 17:26:42 +03:00
|
|
|
result, err := rdb.Incr(ctx, "counter").Result()
|
2017-08-04 11:51:14 +03:00
|
|
|
if err != nil {
|
2014-07-31 16:18:23 +04:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2017-08-04 11:51:14 +03:00
|
|
|
fmt.Println(result)
|
|
|
|
// Output: 1
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2015-11-24 10:09:53 +03:00
|
|
|
func ExampleClient_BLPop() {
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := rdb.RPush(ctx, "queue", "message").Err(); err != nil {
|
2015-11-24 10:09:53 +03:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2023-04-05 19:16:26 +03:00
|
|
|
// use `rdb.BLPop(ctx, 0, "queue")` for infinite waiting time
|
2020-03-11 17:26:42 +03:00
|
|
|
result, err := rdb.BLPop(ctx, 1*time.Second, "queue").Result()
|
2015-11-24 10:09:53 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Println(result[0], result[1])
|
|
|
|
// Output: queue message
|
|
|
|
}
|
|
|
|
|
2015-08-19 11:44:46 +03:00
|
|
|
func ExampleClient_Scan() {
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.FlushDB(ctx)
|
2015-08-19 11:44:46 +03:00
|
|
|
for i := 0; i < 33; i++ {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := rdb.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
|
2015-08-19 11:44:46 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-09 13:07:42 +03:00
|
|
|
var cursor uint64
|
2015-08-19 11:44:46 +03:00
|
|
|
var n int
|
|
|
|
for {
|
|
|
|
var keys []string
|
|
|
|
var err error
|
2020-03-11 17:26:42 +03:00
|
|
|
keys, cursor, err = rdb.Scan(ctx, cursor, "key*", 10).Result()
|
2015-08-19 11:44:46 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
n += len(keys)
|
|
|
|
if cursor == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("found %d keys\n", n)
|
|
|
|
// Output: found 33 keys
|
|
|
|
}
|
|
|
|
|
2021-01-08 22:36:20 +03:00
|
|
|
func ExampleClient_ScanType() {
|
|
|
|
rdb.FlushDB(ctx)
|
|
|
|
for i := 0; i < 33; i++ {
|
|
|
|
err := rdb.Set(ctx, fmt.Sprintf("key%d", i), "value", 0).Err()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var cursor uint64
|
|
|
|
var n int
|
|
|
|
for {
|
|
|
|
var keys []string
|
|
|
|
var err error
|
|
|
|
keys, cursor, err = rdb.ScanType(ctx, cursor, "key*", 10, "string").Result()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
n += len(keys)
|
|
|
|
if cursor == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("found %d keys\n", n)
|
|
|
|
// Output: found 33 keys
|
|
|
|
}
|
|
|
|
|
2022-10-05 10:06:54 +03:00
|
|
|
// ExampleClient_ScanType_hashType uses the keyType "hash".
|
|
|
|
func ExampleClient_ScanType_hashType() {
|
2022-10-04 15:05:14 +03:00
|
|
|
rdb.FlushDB(ctx)
|
|
|
|
for i := 0; i < 33; i++ {
|
|
|
|
err := rdb.HSet(context.TODO(), fmt.Sprintf("key%d", i), "value", "foo").Err()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var allKeys []string
|
|
|
|
var cursor uint64
|
|
|
|
var err error
|
|
|
|
|
|
|
|
for {
|
|
|
|
var keysFromScan []string
|
|
|
|
keysFromScan, cursor, err = rdb.ScanType(context.TODO(), cursor, "key*", 10, "hash").Result()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
allKeys = append(allKeys, keysFromScan...)
|
|
|
|
if cursor == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-10-04 15:09:05 +03:00
|
|
|
fmt.Printf("%d keys ready for use", len(allKeys))
|
|
|
|
// Output: 33 keys ready for use
|
2022-10-04 15:05:14 +03:00
|
|
|
}
|
|
|
|
|
2024-02-23 09:51:20 +03:00
|
|
|
type Income struct {
|
|
|
|
Min float64 `json:"min"`
|
|
|
|
Max float64 `json:"max"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Income) MarshalBinary() (data []byte, err error) {
|
|
|
|
return json.Marshal(*p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Income) UnmarshalBinary(data []byte) error {
|
|
|
|
val := Income{}
|
|
|
|
if err := json.Unmarshal(data, &val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
*p = val
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-27 10:04:46 +03:00
|
|
|
// ExampleMapStringStringCmd_Scan shows how to scan the results of a map fetch
|
2021-02-02 13:58:35 +03:00
|
|
|
// into a struct.
|
2021-04-27 10:04:46 +03:00
|
|
|
func ExampleMapStringStringCmd_Scan() {
|
2021-02-02 13:58:35 +03:00
|
|
|
rdb.FlushDB(ctx)
|
|
|
|
err := rdb.HMSet(ctx, "map",
|
|
|
|
"name", "hello",
|
|
|
|
"count", 123,
|
2024-02-23 09:51:20 +03:00
|
|
|
"correct", true,
|
|
|
|
"income", &Income{Max: 10, Min: 1},
|
|
|
|
).Err()
|
2021-02-02 13:58:35 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the map. The same approach works for HmGet().
|
|
|
|
res := rdb.HGetAll(ctx, "map")
|
|
|
|
if res.Err() != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
type data struct {
|
2024-02-23 09:51:20 +03:00
|
|
|
Name string `redis:"name"`
|
|
|
|
Count int `redis:"count"`
|
|
|
|
Correct bool `redis:"correct"`
|
|
|
|
Income *Income `redis:"income"`
|
2021-02-02 13:58:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Scan the results into the struct.
|
|
|
|
var d data
|
|
|
|
if err := res.Scan(&d); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2024-02-23 09:51:20 +03:00
|
|
|
fmt.Printf("name: %s count: %d correct: %v income: {max: %.0f, mix: %.0f}",
|
|
|
|
d.Name, d.Count, d.Correct, d.Income.Max, d.Income.Min)
|
|
|
|
// Output: name: hello count: 123 correct: true income: {max: 10, mix: 1}
|
2021-02-02 13:58:35 +03:00
|
|
|
}
|
|
|
|
|
2021-02-03 11:07:27 +03:00
|
|
|
// ExampleSliceCmd_Scan shows how to scan the results of a multi key fetch
|
|
|
|
// into a struct.
|
|
|
|
func ExampleSliceCmd_Scan() {
|
|
|
|
rdb.FlushDB(ctx)
|
|
|
|
err := rdb.MSet(ctx,
|
|
|
|
"name", "hello",
|
|
|
|
"count", 123,
|
|
|
|
"correct", true).Err()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
res := rdb.MGet(ctx, "name", "count", "empty", "correct")
|
|
|
|
if res.Err() != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
type data struct {
|
|
|
|
Name string `redis:"name"`
|
|
|
|
Count int `redis:"count"`
|
|
|
|
Correct bool `redis:"correct"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan the results into the struct.
|
|
|
|
var d data
|
|
|
|
if err := res.Scan(&d); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Println(d)
|
|
|
|
// Output: {hello 123 true}
|
|
|
|
}
|
|
|
|
|
2014-05-11 11:42:40 +04:00
|
|
|
func ExampleClient_Pipelined() {
|
2015-06-05 15:02:57 +03:00
|
|
|
var incr *redis.IntCmd
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := rdb.Pipelined(ctx, func(pipe redis.Pipeliner) error {
|
|
|
|
incr = pipe.Incr(ctx, "pipelined_counter")
|
|
|
|
pipe.Expire(ctx, "pipelined_counter", time.Hour)
|
2014-07-02 18:55:00 +04:00
|
|
|
return nil
|
2013-02-17 20:49:17 +04:00
|
|
|
})
|
2015-06-05 15:02:57 +03:00
|
|
|
fmt.Println(incr.Val(), err)
|
|
|
|
// Output: 1 <nil>
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2016-12-22 14:14:34 +03:00
|
|
|
func ExampleClient_Pipeline() {
|
2019-08-18 17:24:17 +03:00
|
|
|
pipe := rdb.Pipeline()
|
2015-06-05 15:02:57 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
incr := pipe.Incr(ctx, "pipeline_counter")
|
|
|
|
pipe.Expire(ctx, "pipeline_counter", time.Hour)
|
2016-12-22 14:14:34 +03:00
|
|
|
|
|
|
|
// Execute
|
|
|
|
//
|
|
|
|
// INCR pipeline_counter
|
|
|
|
// EXPIRE pipeline_counts 3600
|
|
|
|
//
|
2019-08-18 17:24:17 +03:00
|
|
|
// using one rdb-server roundtrip.
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := pipe.Exec(ctx)
|
2016-12-22 14:14:34 +03:00
|
|
|
fmt.Println(incr.Val(), err)
|
|
|
|
// Output: 1 <nil>
|
|
|
|
}
|
|
|
|
|
|
|
|
func ExampleClient_TxPipelined() {
|
|
|
|
var incr *redis.IntCmd
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := rdb.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
|
|
|
incr = pipe.Incr(ctx, "tx_pipelined_counter")
|
|
|
|
pipe.Expire(ctx, "tx_pipelined_counter", time.Hour)
|
2016-12-22 14:14:34 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
fmt.Println(incr.Val(), err)
|
|
|
|
// Output: 1 <nil>
|
|
|
|
}
|
|
|
|
|
|
|
|
func ExampleClient_TxPipeline() {
|
2019-08-18 17:24:17 +03:00
|
|
|
pipe := rdb.TxPipeline()
|
2016-12-22 14:14:34 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
incr := pipe.Incr(ctx, "tx_pipeline_counter")
|
|
|
|
pipe.Expire(ctx, "tx_pipeline_counter", time.Hour)
|
2016-12-22 14:14:34 +03:00
|
|
|
|
|
|
|
// Execute
|
|
|
|
//
|
|
|
|
// MULTI
|
|
|
|
// INCR pipeline_counter
|
|
|
|
// EXPIRE pipeline_counts 3600
|
|
|
|
// EXEC
|
|
|
|
//
|
2019-08-18 17:24:17 +03:00
|
|
|
// using one rdb-server roundtrip.
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := pipe.Exec(ctx)
|
2015-06-05 15:02:57 +03:00
|
|
|
fmt.Println(incr.Val(), err)
|
|
|
|
// Output: 1 <nil>
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2015-11-15 12:11:02 +03:00
|
|
|
func ExampleClient_Watch() {
|
2022-10-11 15:37:34 +03:00
|
|
|
const maxRetries = 10000
|
2015-11-15 11:23:00 +03:00
|
|
|
|
2020-06-10 15:04:12 +03:00
|
|
|
// Increment transactionally increments key using GET and SET commands.
|
2018-12-19 23:57:44 +03:00
|
|
|
increment := func(key string) error {
|
2020-06-10 15:04:12 +03:00
|
|
|
// Transactional function.
|
2018-12-19 23:57:44 +03:00
|
|
|
txf := func(tx *redis.Tx) error {
|
2020-06-10 15:04:12 +03:00
|
|
|
// Get current value or zero.
|
2020-03-11 17:26:42 +03:00
|
|
|
n, err := tx.Get(ctx, key).Int()
|
2016-05-02 15:54:15 +03:00
|
|
|
if err != nil && err != redis.Nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-06-05 15:02:57 +03:00
|
|
|
|
2020-06-10 15:04:12 +03:00
|
|
|
// Actual opperation (local in optimistic lock).
|
2018-12-19 23:57:44 +03:00
|
|
|
n++
|
|
|
|
|
2021-07-22 05:27:46 +03:00
|
|
|
// Operation is committed only if the watched keys remain unchanged.
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error {
|
|
|
|
pipe.Set(ctx, key, n, 0)
|
2016-05-02 15:54:15 +03:00
|
|
|
return nil
|
|
|
|
})
|
2015-06-05 15:02:57 +03:00
|
|
|
return err
|
2015-11-15 11:23:00 +03:00
|
|
|
}
|
2018-12-19 23:57:44 +03:00
|
|
|
|
2020-06-10 15:04:12 +03:00
|
|
|
for i := 0; i < maxRetries; i++ {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := rdb.Watch(ctx, txf, key)
|
2020-06-10 15:04:12 +03:00
|
|
|
if err == nil {
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err == redis.TxFailedErr {
|
|
|
|
// Optimistic lock lost. Retry.
|
|
|
|
continue
|
2018-12-19 23:57:44 +03:00
|
|
|
}
|
2020-06-10 15:04:12 +03:00
|
|
|
// Return any other error.
|
|
|
|
return err
|
2018-12-19 23:57:44 +03:00
|
|
|
}
|
2020-06-10 15:04:12 +03:00
|
|
|
|
2018-12-19 23:57:44 +03:00
|
|
|
return errors.New("increment reached maximum number of retries")
|
2014-05-11 11:42:40 +04:00
|
|
|
}
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2015-06-05 15:02:57 +03:00
|
|
|
var wg sync.WaitGroup
|
2020-06-10 15:04:12 +03:00
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
wg.Add(1)
|
2015-06-05 15:02:57 +03:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2018-12-19 23:57:44 +03:00
|
|
|
if err := increment("counter3"); err != nil {
|
|
|
|
fmt.Println("increment error:", err)
|
2015-06-05 15:02:57 +03:00
|
|
|
}
|
|
|
|
}()
|
2014-05-11 11:42:40 +04:00
|
|
|
}
|
2015-06-05 15:02:57 +03:00
|
|
|
wg.Wait()
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
n, err := rdb.Get(ctx, "counter3").Int()
|
2018-12-19 23:57:44 +03:00
|
|
|
fmt.Println("ended with", n, err)
|
|
|
|
// Output: ended with 100 <nil>
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func ExamplePubSub() {
|
2020-03-11 17:26:42 +03:00
|
|
|
pubsub := rdb.Subscribe(ctx, "mychannel1")
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2018-07-24 10:48:14 +03:00
|
|
|
// Wait for confirmation that subscription is created before publishing anything.
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := pubsub.Receive(ctx)
|
2018-07-24 10:48:14 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2018-07-24 09:41:14 +03:00
|
|
|
// Go channel which receives messages.
|
|
|
|
ch := pubsub.Channel()
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2018-07-24 09:41:14 +03:00
|
|
|
// Publish a message.
|
2020-03-11 17:26:42 +03:00
|
|
|
err = rdb.Publish(ctx, "mychannel1", "hello").Err()
|
2015-09-06 13:50:16 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
2018-07-24 14:50:16 +03:00
|
|
|
time.AfterFunc(time.Second, func() {
|
|
|
|
// When pubsub is closed channel is closed too.
|
|
|
|
_ = pubsub.Close()
|
|
|
|
})
|
|
|
|
|
|
|
|
// Consume messages.
|
2018-10-04 02:36:21 +03:00
|
|
|
for msg := range ch {
|
2018-07-24 14:50:16 +03:00
|
|
|
fmt.Println(msg.Channel, msg.Payload)
|
|
|
|
}
|
|
|
|
|
2018-07-24 09:41:14 +03:00
|
|
|
// Output: mychannel1 hello
|
2015-09-06 13:50:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func ExamplePubSub_Receive() {
|
2020-03-11 17:26:42 +03:00
|
|
|
pubsub := rdb.Subscribe(ctx, "mychannel2")
|
2015-09-06 13:50:16 +03:00
|
|
|
defer pubsub.Close()
|
|
|
|
|
2016-03-15 15:45:04 +03:00
|
|
|
for i := 0; i < 2; i++ {
|
2015-11-22 15:44:38 +03:00
|
|
|
// ReceiveTimeout is a low level API. Use ReceiveMessage instead.
|
2020-03-11 17:26:42 +03:00
|
|
|
msgi, err := pubsub.ReceiveTimeout(ctx, time.Second)
|
2015-05-23 14:15:05 +03:00
|
|
|
if err != nil {
|
2016-03-14 17:51:46 +03:00
|
|
|
break
|
2015-05-23 14:15:05 +03:00
|
|
|
}
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2015-05-23 14:15:05 +03:00
|
|
|
switch msg := msgi.(type) {
|
|
|
|
case *redis.Subscription:
|
2016-03-14 17:51:46 +03:00
|
|
|
fmt.Println("subscribed to", msg.Channel)
|
2017-07-01 13:22:39 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := rdb.Publish(ctx, "mychannel2", "hello").Result()
|
2017-07-01 13:22:39 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2015-05-23 14:15:05 +03:00
|
|
|
case *redis.Message:
|
2016-03-14 17:51:46 +03:00
|
|
|
fmt.Println("received", msg.Payload, "from", msg.Channel)
|
2015-05-23 14:15:05 +03:00
|
|
|
default:
|
2017-07-01 13:22:39 +03:00
|
|
|
panic("unreached")
|
2015-05-23 14:15:05 +03:00
|
|
|
}
|
|
|
|
}
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2019-08-18 17:24:17 +03:00
|
|
|
// sent message to 1 rdb
|
2016-03-14 17:51:46 +03:00
|
|
|
// received hello from mychannel2
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2014-05-11 11:42:40 +04:00
|
|
|
func ExampleScript() {
|
2015-06-16 10:31:21 +03:00
|
|
|
IncrByXX := redis.NewScript(`
|
|
|
|
if redis.call("GET", KEYS[1]) ~= false then
|
|
|
|
return redis.call("INCRBY", KEYS[1], ARGV[1])
|
|
|
|
end
|
|
|
|
return false
|
|
|
|
`)
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
n, err := IncrByXX.Run(ctx, rdb, []string{"xx_counter"}, 2).Result()
|
2015-06-16 10:31:21 +03:00
|
|
|
fmt.Println(n, err)
|
2014-05-11 11:42:40 +04:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
err = rdb.Set(ctx, "xx_counter", "40", 0).Err()
|
2015-06-16 10:31:21 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-05-11 11:42:40 +04:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
n, err = IncrByXX.Run(ctx, rdb, []string{"xx_counter"}, 2).Result()
|
2015-06-16 10:31:21 +03:00
|
|
|
fmt.Println(n, err)
|
2014-05-11 11:42:40 +04:00
|
|
|
|
2015-06-16 10:31:21 +03:00
|
|
|
// Output: <nil> redis: nil
|
|
|
|
// 42 <nil>
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
|
|
|
|
2014-05-11 11:42:40 +04:00
|
|
|
func Example_customCommand() {
|
2020-03-11 17:26:42 +03:00
|
|
|
Get := func(ctx context.Context, rdb *redis.Client, key string) *redis.StringCmd {
|
|
|
|
cmd := redis.NewStringCmd(ctx, "get", key)
|
|
|
|
rdb.Process(ctx, cmd)
|
2014-05-11 11:42:40 +04:00
|
|
|
return cmd
|
|
|
|
}
|
2013-02-17 20:49:17 +04:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
v, err := Get(ctx, rdb, "key_does_not_exist").Result()
|
2014-05-11 11:42:40 +04:00
|
|
|
fmt.Printf("%q %s", v, err)
|
|
|
|
// Output: "" redis: nil
|
2013-02-17 20:49:17 +04:00
|
|
|
}
|
2016-04-13 11:52:47 +03:00
|
|
|
|
2018-08-12 11:11:01 +03:00
|
|
|
func Example_customCommand2() {
|
2020-03-11 17:26:42 +03:00
|
|
|
v, err := rdb.Do(ctx, "get", "key_does_not_exist").Text()
|
2018-08-12 11:11:01 +03:00
|
|
|
fmt.Printf("%q %s", v, err)
|
|
|
|
// Output: "" redis: nil
|
|
|
|
}
|
|
|
|
|
2016-04-13 11:52:47 +03:00
|
|
|
func ExampleScanIterator() {
|
2020-03-11 17:26:42 +03:00
|
|
|
iter := rdb.Scan(ctx, 0, "", 0).Iterator()
|
|
|
|
for iter.Next(ctx) {
|
2016-04-13 11:52:47 +03:00
|
|
|
fmt.Println(iter.Val())
|
|
|
|
}
|
|
|
|
if err := iter.Err(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func ExampleScanCmd_Iterator() {
|
2020-03-11 17:26:42 +03:00
|
|
|
iter := rdb.Scan(ctx, 0, "", 0).Iterator()
|
|
|
|
for iter.Next(ctx) {
|
2016-04-13 11:52:47 +03:00
|
|
|
fmt.Println(iter.Val())
|
|
|
|
}
|
|
|
|
if err := iter.Err(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2017-02-17 13:12:06 +03:00
|
|
|
|
|
|
|
func ExampleNewUniversalClient_simple() {
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewUniversalClient(&redis.UniversalOptions{
|
2017-02-17 13:12:06 +03:00
|
|
|
Addrs: []string{":6379"},
|
|
|
|
})
|
2019-08-18 17:24:17 +03:00
|
|
|
defer rdb.Close()
|
2017-02-17 13:12:06 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2017-02-17 13:12:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func ExampleNewUniversalClient_failover() {
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewUniversalClient(&redis.UniversalOptions{
|
2017-02-17 13:12:06 +03:00
|
|
|
MasterName: "master",
|
|
|
|
Addrs: []string{":26379"},
|
|
|
|
})
|
2019-08-18 17:24:17 +03:00
|
|
|
defer rdb.Close()
|
2017-02-17 13:12:06 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2017-02-17 13:12:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func ExampleNewUniversalClient_cluster() {
|
2019-08-18 17:24:17 +03:00
|
|
|
rdb := redis.NewUniversalClient(&redis.UniversalOptions{
|
2017-02-17 13:12:06 +03:00
|
|
|
Addrs: []string{":7000", ":7001", ":7002", ":7003", ":7004", ":7005"},
|
|
|
|
})
|
2019-08-18 17:24:17 +03:00
|
|
|
defer rdb.Close()
|
2017-02-17 13:12:06 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb.Ping(ctx)
|
2017-02-17 13:12:06 +03:00
|
|
|
}
|
2020-06-11 10:24:04 +03:00
|
|
|
|
2020-09-09 17:42:05 +03:00
|
|
|
func ExampleClient_SlowLogGet() {
|
2024-01-01 23:19:22 +03:00
|
|
|
if RECluster {
|
|
|
|
// skip slowlog test for cluster
|
|
|
|
fmt.Println(2)
|
|
|
|
return
|
|
|
|
}
|
2020-09-09 12:49:45 +03:00
|
|
|
const key = "slowlog-log-slower-than"
|
|
|
|
|
|
|
|
old := rdb.ConfigGet(ctx, key).Val()
|
|
|
|
rdb.ConfigSet(ctx, key, "0")
|
2021-04-27 10:04:46 +03:00
|
|
|
defer rdb.ConfigSet(ctx, key, old[key])
|
2020-09-09 12:49:45 +03:00
|
|
|
|
|
|
|
if err := rdb.Do(ctx, "slowlog", "reset").Err(); err != nil {
|
|
|
|
panic(err)
|
2020-06-11 10:24:04 +03:00
|
|
|
}
|
2020-09-09 12:49:45 +03:00
|
|
|
|
2020-06-11 10:24:04 +03:00
|
|
|
rdb.Set(ctx, "test", "true", 0)
|
2020-09-09 12:49:45 +03:00
|
|
|
|
2020-09-09 17:42:05 +03:00
|
|
|
result, err := rdb.SlowLogGet(ctx, -1).Result()
|
2020-09-09 12:49:45 +03:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
fmt.Println(len(result))
|
|
|
|
// Output: 2
|
2020-06-11 10:24:04 +03:00
|
|
|
}
|