2016-03-10 12:10:47 +03:00
|
|
|
package redis_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-05-31 17:03:20 +03:00
|
|
|
"context"
|
2018-08-10 13:55:57 +03:00
|
|
|
"fmt"
|
2021-11-05 20:46:21 +03:00
|
|
|
"strconv"
|
2018-08-10 13:55:57 +03:00
|
|
|
"strings"
|
2021-03-20 10:34:25 +03:00
|
|
|
"sync"
|
2016-03-10 12:10:47 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-12-12 15:55:49 +03:00
|
|
|
"git.internal/re/redis/v8"
|
2016-03-10 12:10:47 +03:00
|
|
|
)
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
|
2016-03-10 12:10:47 +03:00
|
|
|
client := redis.NewClient(&redis.Options{
|
|
|
|
Addr: ":6379",
|
|
|
|
DialTimeout: time.Second,
|
|
|
|
ReadTimeout: time.Second,
|
|
|
|
WriteTimeout: time.Second,
|
|
|
|
PoolSize: poolSize,
|
|
|
|
})
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.FlushDB(ctx).Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return client
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRedisPing(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
2021-03-20 10:34:25 +03:00
|
|
|
rdb := benchmarkRedisClient(ctx, 10)
|
|
|
|
defer rdb.Close()
|
2016-03-10 12:10:47 +03:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2021-03-20 10:34:25 +03:00
|
|
|
if err := rdb.Ping(ctx).Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-03-20 10:34:25 +03:00
|
|
|
func BenchmarkSetGoroutines(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
rdb := benchmarkRedisClient(ctx, 10)
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
err := rdb.Set(ctx, "hello", "world", 0).Err()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-10 12:10:47 +03:00
|
|
|
func BenchmarkRedisGetNil(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
2016-03-10 12:10:47 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.Get(ctx, "key").Err(); err != redis.Nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-08-10 13:55:57 +03:00
|
|
|
type setStringBenchmark struct {
|
|
|
|
poolSize int
|
|
|
|
valueSize int
|
2016-03-10 12:10:47 +03:00
|
|
|
}
|
|
|
|
|
2018-08-10 13:55:57 +03:00
|
|
|
func (bm setStringBenchmark) String() string {
|
|
|
|
return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize)
|
2016-03-10 12:10:47 +03:00
|
|
|
}
|
|
|
|
|
2018-08-10 13:55:57 +03:00
|
|
|
func BenchmarkRedisSetString(b *testing.B) {
|
|
|
|
benchmarks := []setStringBenchmark{
|
|
|
|
{10, 64},
|
|
|
|
{10, 1024},
|
|
|
|
{10, 64 * 1024},
|
|
|
|
{10, 1024 * 1024},
|
2018-08-15 09:10:53 +03:00
|
|
|
{10, 10 * 1024 * 1024},
|
2018-08-10 13:55:57 +03:00
|
|
|
|
|
|
|
{100, 64},
|
|
|
|
{100, 1024},
|
|
|
|
{100, 64 * 1024},
|
|
|
|
{100, 1024 * 1024},
|
2018-08-15 09:10:53 +03:00
|
|
|
{100, 10 * 1024 * 1024},
|
2018-08-10 13:55:57 +03:00
|
|
|
}
|
|
|
|
for _, bm := range benchmarks {
|
|
|
|
b.Run(bm.String(), func(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, bm.poolSize)
|
2018-08-10 13:55:57 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
value := strings.Repeat("1", bm.valueSize)
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := client.Set(ctx, "key", value, 0).Err()
|
2018-08-10 13:55:57 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2016-03-10 12:10:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRedisSetGetBytes(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
2016-03-10 12:10:47 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
value := bytes.Repeat([]byte{'1'}, 10000)
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.Set(ctx, "key", value, 0).Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
got, err := client.Get(ctx, "key").Bytes()
|
2016-03-10 12:10:47 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(got, value) {
|
|
|
|
b.Fatalf("got != value")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRedisMGet(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
2016-03-10 12:10:47 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.MSet(ctx, "key1", "hello1", "key2", "hello2").Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.MGet(ctx, "key1", "key2").Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSetExpire(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
2016-03-10 12:10:47 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.Set(ctx, "key", "hello", 0).Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := client.Expire(ctx, "key", time.Second).Err(); err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkPipeline(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
2016-03-10 12:10:47 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
_, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
|
|
|
|
pipe.Set(ctx, "key", "hello", 0)
|
|
|
|
pipe.Expire(ctx, "key", time.Second)
|
2016-03-10 12:10:47 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkZAdd(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
2016-03-10 12:10:47 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := client.ZAdd(ctx, "key", &redis.Z{
|
2018-03-07 12:56:24 +03:00
|
|
|
Score: float64(1),
|
|
|
|
Member: "hello",
|
|
|
|
}).Err()
|
|
|
|
if err != nil {
|
2016-03-10 12:10:47 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-05-31 17:03:20 +03:00
|
|
|
|
2021-11-05 20:46:21 +03:00
|
|
|
func BenchmarkXRead(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
client := benchmarkRedisClient(ctx, 10)
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
args := redis.XAddArgs{
|
|
|
|
Stream: "1",
|
|
|
|
ID: "*",
|
|
|
|
Values: map[string]string{"uno": "dos"},
|
|
|
|
}
|
|
|
|
|
|
|
|
lenStreams := 16
|
|
|
|
streams := make([]string, 0, lenStreams)
|
|
|
|
for i := 0; i < lenStreams; i++ {
|
|
|
|
streams = append(streams, strconv.Itoa(i))
|
|
|
|
}
|
|
|
|
for i := 0; i < lenStreams; i++ {
|
|
|
|
streams = append(streams, "0")
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
client.XAdd(ctx, &args)
|
|
|
|
|
|
|
|
err := client.XRead(ctx, &redis.XReadArgs{
|
|
|
|
Streams: streams,
|
|
|
|
Count: 1,
|
|
|
|
Block: time.Second,
|
|
|
|
}).Err()
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-31 17:03:20 +03:00
|
|
|
var clientSink *redis.Client
|
|
|
|
|
|
|
|
func BenchmarkWithContext(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
2020-03-11 17:26:42 +03:00
|
|
|
rdb := benchmarkRedisClient(ctx, 10)
|
|
|
|
defer rdb.Close()
|
2019-05-31 17:03:20 +03:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
clientSink = rdb.WithContext(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var ringSink *redis.Ring
|
|
|
|
|
|
|
|
func BenchmarkRingWithContext(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
2019-05-31 17:03:20 +03:00
|
|
|
rdb := redis.NewRing(&redis.RingOptions{})
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
ringSink = rdb.WithContext(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
func newClusterScenario() *clusterScenario {
|
|
|
|
return &clusterScenario{
|
|
|
|
ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
|
2019-07-25 13:53:00 +03:00
|
|
|
nodeIDs: make([]string, 6),
|
2019-05-31 17:03:20 +03:00
|
|
|
processes: make(map[string]*redisProcess, 6),
|
|
|
|
clients: make(map[string]*redis.Client, 6),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkClusterPing(b *testing.B) {
|
|
|
|
if testing.Short() {
|
|
|
|
b.Skip("skipping in short mode")
|
|
|
|
}
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
2019-05-31 17:03:20 +03:00
|
|
|
cluster := newClusterScenario()
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := startCluster(ctx, cluster); err != nil {
|
2019-05-31 17:03:20 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
2020-09-09 17:39:13 +03:00
|
|
|
defer cluster.Close()
|
2019-05-31 17:03:20 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
client := cluster.newClusterClient(ctx, redisClusterOptions())
|
2019-05-31 17:03:20 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := client.Ping(ctx).Err()
|
2019-05-31 17:03:20 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-02-18 06:36:04 +03:00
|
|
|
func BenchmarkClusterDoInt(b *testing.B) {
|
|
|
|
if testing.Short() {
|
|
|
|
b.Skip("skipping in short mode")
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
cluster := newClusterScenario()
|
|
|
|
if err := startCluster(ctx, cluster); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer cluster.Close()
|
|
|
|
|
|
|
|
client := cluster.newClusterClient(ctx, redisClusterOptions())
|
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
err := client.Do(ctx, "SET", 10, 10).Err()
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-05-31 17:03:20 +03:00
|
|
|
func BenchmarkClusterSetString(b *testing.B) {
|
|
|
|
if testing.Short() {
|
|
|
|
b.Skip("skipping in short mode")
|
|
|
|
}
|
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
2019-05-31 17:03:20 +03:00
|
|
|
cluster := newClusterScenario()
|
2020-03-11 17:26:42 +03:00
|
|
|
if err := startCluster(ctx, cluster); err != nil {
|
2019-05-31 17:03:20 +03:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
2020-09-09 17:39:13 +03:00
|
|
|
defer cluster.Close()
|
2019-05-31 17:03:20 +03:00
|
|
|
|
2020-03-11 17:26:42 +03:00
|
|
|
client := cluster.newClusterClient(ctx, redisClusterOptions())
|
2019-05-31 17:03:20 +03:00
|
|
|
defer client.Close()
|
|
|
|
|
|
|
|
value := string(bytes.Repeat([]byte{'1'}, 10000))
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
2020-03-11 17:26:42 +03:00
|
|
|
err := client.Set(ctx, "key", value, 0).Err()
|
2019-05-31 17:03:20 +03:00
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
var clusterSink *redis.ClusterClient
|
|
|
|
|
|
|
|
func BenchmarkClusterWithContext(b *testing.B) {
|
2020-03-11 17:26:42 +03:00
|
|
|
ctx := context.Background()
|
2019-05-31 17:03:20 +03:00
|
|
|
rdb := redis.NewClusterClient(&redis.ClusterOptions{})
|
|
|
|
defer rdb.Close()
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
clusterSink = rdb.WithContext(ctx)
|
|
|
|
}
|
|
|
|
}
|