redis/bench_test.go

322 lines
6.2 KiB
Go
Raw Normal View History

2016-03-10 12:10:47 +03:00
package redis_test
import (
"bytes"
2019-05-31 17:03:20 +03:00
"context"
2018-08-10 13:55:57 +03:00
"fmt"
"strings"
2016-03-10 12:10:47 +03:00
"testing"
"time"
2020-03-11 17:29:16 +03:00
"github.com/go-redis/redis/v8"
2016-03-10 12:10:47 +03:00
)
2020-03-11 17:26:42 +03:00
func benchmarkRedisClient(ctx context.Context, poolSize int) *redis.Client {
2016-03-10 12:10:47 +03:00
client := redis.NewClient(&redis.Options{
Addr: ":6379",
DialTimeout: time.Second,
ReadTimeout: time.Second,
WriteTimeout: time.Second,
PoolSize: poolSize,
})
2020-03-11 17:26:42 +03:00
if err := client.FlushDB(ctx).Err(); err != nil {
2016-03-10 12:10:47 +03:00
panic(err)
}
return client
}
func BenchmarkRedisPing(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
if err := client.Ping(ctx).Err(); err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
}
})
}
func BenchmarkRedisGetNil(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
if err := client.Get(ctx, "key").Err(); err != redis.Nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
}
})
}
2018-08-10 13:55:57 +03:00
type setStringBenchmark struct {
poolSize int
valueSize int
2016-03-10 12:10:47 +03:00
}
2018-08-10 13:55:57 +03:00
func (bm setStringBenchmark) String() string {
return fmt.Sprintf("pool=%d value=%d", bm.poolSize, bm.valueSize)
2016-03-10 12:10:47 +03:00
}
2018-08-10 13:55:57 +03:00
func BenchmarkRedisSetString(b *testing.B) {
benchmarks := []setStringBenchmark{
{10, 64},
{10, 1024},
{10, 64 * 1024},
{10, 1024 * 1024},
2018-08-15 09:10:53 +03:00
{10, 10 * 1024 * 1024},
2018-08-10 13:55:57 +03:00
{100, 64},
{100, 1024},
{100, 64 * 1024},
{100, 1024 * 1024},
2018-08-15 09:10:53 +03:00
{100, 10 * 1024 * 1024},
2018-08-10 13:55:57 +03:00
}
for _, bm := range benchmarks {
b.Run(bm.String(), func(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, bm.poolSize)
2018-08-10 13:55:57 +03:00
defer client.Close()
value := strings.Repeat("1", bm.valueSize)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
err := client.Set(ctx, "key", value, 0).Err()
2018-08-10 13:55:57 +03:00
if err != nil {
b.Fatal(err)
}
}
})
})
}
2016-03-10 12:10:47 +03:00
}
func BenchmarkRedisSetGetBytes(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
value := bytes.Repeat([]byte{'1'}, 10000)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
if err := client.Set(ctx, "key", value, 0).Err(); err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
2020-03-11 17:26:42 +03:00
got, err := client.Get(ctx, "key").Bytes()
2016-03-10 12:10:47 +03:00
if err != nil {
b.Fatal(err)
}
if !bytes.Equal(got, value) {
b.Fatalf("got != value")
}
}
})
}
func BenchmarkRedisMGet(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
2020-03-11 17:26:42 +03:00
if err := client.MSet(ctx, "key1", "hello1", "key2", "hello2").Err(); err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
if err := client.MGet(ctx, "key1", "key2").Err(); err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
}
})
}
func BenchmarkSetExpire(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
if err := client.Set(ctx, "key", "hello", 0).Err(); err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
2020-03-11 17:26:42 +03:00
if err := client.Expire(ctx, "key", time.Second).Err(); err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
}
})
}
func BenchmarkPipeline(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
_, err := client.Pipelined(ctx, func(pipe redis.Pipeliner) error {
pipe.Set(ctx, "key", "hello", 0)
pipe.Expire(ctx, "key", time.Second)
2016-03-10 12:10:47 +03:00
return nil
})
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkZAdd(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
client := benchmarkRedisClient(ctx, 10)
2016-03-10 12:10:47 +03:00
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
err := client.ZAdd(ctx, "key", &redis.Z{
2018-03-07 12:56:24 +03:00
Score: float64(1),
Member: "hello",
}).Err()
if err != nil {
2016-03-10 12:10:47 +03:00
b.Fatal(err)
}
}
})
}
2019-05-31 17:03:20 +03:00
var clientSink *redis.Client
func BenchmarkWithContext(b *testing.B) {
ctx := context.Background()
2020-03-11 17:26:42 +03:00
rdb := benchmarkRedisClient(ctx, 10)
defer rdb.Close()
2019-05-31 17:03:20 +03:00
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
clientSink = rdb.WithContext(ctx)
}
}
var ringSink *redis.Ring
func BenchmarkRingWithContext(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
2019-05-31 17:03:20 +03:00
rdb := redis.NewRing(&redis.RingOptions{})
defer rdb.Close()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
ringSink = rdb.WithContext(ctx)
}
}
//------------------------------------------------------------------------------
func newClusterScenario() *clusterScenario {
return &clusterScenario{
ports: []string{"8220", "8221", "8222", "8223", "8224", "8225"},
2019-07-25 13:53:00 +03:00
nodeIDs: make([]string, 6),
2019-05-31 17:03:20 +03:00
processes: make(map[string]*redisProcess, 6),
clients: make(map[string]*redis.Client, 6),
}
}
func BenchmarkClusterPing(b *testing.B) {
if testing.Short() {
b.Skip("skipping in short mode")
}
2020-03-11 17:26:42 +03:00
ctx := context.Background()
2019-05-31 17:03:20 +03:00
cluster := newClusterScenario()
2020-03-11 17:26:42 +03:00
if err := startCluster(ctx, cluster); err != nil {
2019-05-31 17:03:20 +03:00
b.Fatal(err)
}
2020-09-09 17:39:13 +03:00
defer cluster.Close()
2019-05-31 17:03:20 +03:00
2020-03-11 17:26:42 +03:00
client := cluster.newClusterClient(ctx, redisClusterOptions())
2019-05-31 17:03:20 +03:00
defer client.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
err := client.Ping(ctx).Err()
2019-05-31 17:03:20 +03:00
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkClusterSetString(b *testing.B) {
if testing.Short() {
b.Skip("skipping in short mode")
}
2020-03-11 17:26:42 +03:00
ctx := context.Background()
2019-05-31 17:03:20 +03:00
cluster := newClusterScenario()
2020-03-11 17:26:42 +03:00
if err := startCluster(ctx, cluster); err != nil {
2019-05-31 17:03:20 +03:00
b.Fatal(err)
}
2020-09-09 17:39:13 +03:00
defer cluster.Close()
2019-05-31 17:03:20 +03:00
2020-03-11 17:26:42 +03:00
client := cluster.newClusterClient(ctx, redisClusterOptions())
2019-05-31 17:03:20 +03:00
defer client.Close()
value := string(bytes.Repeat([]byte{'1'}, 10000))
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
2020-03-11 17:26:42 +03:00
err := client.Set(ctx, "key", value, 0).Err()
2019-05-31 17:03:20 +03:00
if err != nil {
b.Fatal(err)
}
}
})
}
var clusterSink *redis.ClusterClient
func BenchmarkClusterWithContext(b *testing.B) {
2020-03-11 17:26:42 +03:00
ctx := context.Background()
2019-05-31 17:03:20 +03:00
rdb := redis.NewClusterClient(&redis.ClusterOptions{})
defer rdb.Close()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
clusterSink = rdb.WithContext(ctx)
}
}