diff --git a/.github/wordlist.txt b/.github/wordlist.txt index 52fdc1bc..dceddff4 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -57,4 +57,5 @@ url variadic RedisStack RedisGears -RedisTimeseries \ No newline at end of file +RedisTimeseries +RediSearch diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index a139f5da..5210ccfa 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -23,4 +23,4 @@ jobs: steps: - uses: actions/checkout@v4 - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index f739a542..62e38997 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -8,7 +8,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: Check Spelling - uses: rojopolis/spellcheck-github-actions@0.36.0 + uses: rojopolis/spellcheck-github-actions@0.38.0 with: config_path: .github/spellcheck-settings.yml task_name: Markdown diff --git a/Makefile b/Makefile index ea5321f2..d8d00759 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ build: testdata/redis: mkdir -p $@ - wget -qO- https://download.redis.io/releases/redis-7.2.1.tar.gz | tar xvz --strip-components=1 -C $@ + wget -qO- https://download.redis.io/releases/redis-7.4-rc2.tar.gz | tar xvz --strip-components=1 -C $@ testdata/redis/src/redis-server: testdata/redis cd $< && make all diff --git a/command.go b/command.go index 93f57bdb..c739f834 100644 --- a/command.go +++ b/command.go @@ -573,6 +573,10 @@ func (cmd *StatusCmd) Result() (string, error) { return cmd.val, cmd.err } +func (cmd *StatusCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + func (cmd *StatusCmd) String() string { return cmdString(cmd, cmd.val) } @@ -3783,6 +3787,65 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { return nil } +// ----------------------------------------------------------------------- +// MapStringInterfaceCmd represents a command that returns a map of strings to interface{}. +type MapMapStringInterfaceCmd struct { + baseCmd + val map[string]interface{} +} + +func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd { + return &MapMapStringInterfaceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapMapStringInterfaceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} { + return cmd.val +} + +func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + data := make(map[string]interface{}, n/2) + for i := 0; i < n; i += 2 { + _, err := rd.ReadArrayLen() + if err != nil { + cmd.err = err + } + key, err := rd.ReadString() + if err != nil { + cmd.err = err + } + value, err := rd.ReadString() + if err != nil { + cmd.err = err + } + data[key] = value + } + + cmd.val = data + return nil +} + //----------------------------------------------------------------------- type MapStringInterfaceSliceCmd struct { @@ -4997,6 +5060,7 @@ type ClientInfo struct { PSub int // number of pattern matching subscriptions SSub int // redis version 7.0.3, number of shard channel subscriptions Multi int // number of commands in a MULTI/EXEC context + Watch int // redis version 7.4 RC1, number of keys this client is currently watching. QueryBuf int // qbuf, query buffer length (0 means no query pending) QueryBufFree int // qbuf-free, free space of the query buffer (0 means the buffer is full) ArgvMem int // incomplete arguments for the next command (already extracted from query buffer) @@ -5149,6 +5213,8 @@ func parseClientInfo(txt string) (info *ClientInfo, err error) { info.SSub, err = strconv.Atoi(val) case "multi": info.Multi, err = strconv.Atoi(val) + case "watch": + info.Watch, err = strconv.Atoi(val) case "qbuf": info.QueryBuf, err = strconv.Atoi(val) case "qbuf-free": diff --git a/commands.go b/commands.go index db595944..034daa23 100644 --- a/commands.go +++ b/commands.go @@ -220,6 +220,7 @@ type Cmdable interface { ProbabilisticCmdable PubSubCmdable ScriptingFunctionsCmdable + SearchCmdable SetCmdable SortedSetCmdable StringCmdable diff --git a/commands_test.go b/commands_test.go index 10e94e26..19832610 100644 --- a/commands_test.go +++ b/commands_test.go @@ -193,6 +193,40 @@ var _ = Describe("Commands", func() { Expect(r.Val()).To(Equal(int64(0))) }) + It("should ClientKillByFilter with MAXAGE", Label("NonRedisEnterprise"), func() { + var s []string + started := make(chan bool) + done := make(chan bool) + + go func() { + defer GinkgoRecover() + + started <- true + blpop := client.BLPop(ctx, 0, "list") + Expect(blpop.Val()).To(Equal(s)) + done <- true + }() + <-started + + select { + case <-done: + Fail("BLPOP is not blocked.") + case <-time.After(2 * time.Second): + // ok + } + + killed := client.ClientKillByFilter(ctx, "MAXAGE", "1") + Expect(killed.Err()).NotTo(HaveOccurred()) + Expect(killed.Val()).To(SatisfyAny(Equal(int64(2)), Equal(int64(3)))) + + select { + case <-done: + // ok + case <-time.After(time.Second): + Fail("BLPOP is still blocked.") + } + }) + It("should ClientID", func() { err := client.ClientID(ctx).Err() Expect(err).NotTo(HaveOccurred()) @@ -1099,6 +1133,26 @@ var _ = Describe("Commands", func() { keys, cursor, err := client.HScan(ctx, "myhash", 0, "", 0).Result() Expect(err).NotTo(HaveOccurred()) + // If we don't get at least two items back, it's really strange. + Expect(cursor).To(BeNumerically(">=", 2)) + Expect(len(keys)).To(BeNumerically(">=", 2)) + Expect(keys[0]).To(HavePrefix("key")) + Expect(keys[1]).To(Equal("hello")) + }) + + It("should HScan without values", Label("NonRedisEnterprise"), func() { + for i := 0; i < 1000; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + keys, cursor, err := client.HScanNoValues(ctx, "myhash", 0, "", 0).Result() + Expect(err).NotTo(HaveOccurred()) + // If we don't get at least two items back, it's really strange. + Expect(cursor).To(BeNumerically(">=", 2)) + Expect(len(keys)).To(BeNumerically(">=", 2)) + Expect(keys[0]).To(HavePrefix("key")) + Expect(keys[1]).To(HavePrefix("key")) Expect(keys).NotTo(BeEmpty()) Expect(cursor).NotTo(BeZero()) }) @@ -2429,6 +2483,166 @@ var _ = Describe("Commands", func() { Equal([]redis.KeyValue{{Key: "key2", Value: "hello2"}}), )) }) + + It("should HExpire", Label("hash-expiration", "NonRedisEnterprise"), func() { + res, err := client.HExpire(ctx, "no_such_key", 10*time.Second, "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(res).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err = client.HExpire(ctx, "myhash", 10*time.Second, "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, 1, -2})) + }) + + It("should HPExpire", Label("hash-expiration", "NonRedisEnterprise"), func() { + res, err := client.HPExpire(ctx, "no_such_key", 10*time.Second, "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(res).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err = client.HPExpire(ctx, "myhash", 10*time.Second, "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, 1, -2})) + }) + + It("should HExpireAt", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HExpireAt(ctx, "no_such_key", time.Now().Add(10*time.Second), "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err := client.HExpireAt(ctx, "myhash", time.Now().Add(10*time.Second), "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, 1, -2})) + }) + + It("should HPExpireAt", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HPExpireAt(ctx, "no_such_key", time.Now().Add(10*time.Second), "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err := client.HPExpireAt(ctx, "myhash", time.Now().Add(10*time.Second), "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, 1, -2})) + }) + + It("should HPersist", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HPersist(ctx, "no_such_key", "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err := client.HPersist(ctx, "myhash", "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{-1, -1, -2})) + + res, err = client.HExpire(ctx, "myhash", 10*time.Second, "key1", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, -2})) + + res, err = client.HPersist(ctx, "myhash", "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, -1, -2})) + }) + + It("should HExpireTime", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HExpireTime(ctx, "no_such_key", "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err := client.HExpire(ctx, "myhash", 10*time.Second, "key1", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, -2})) + + res, err = client.HExpireTime(ctx, "myhash", "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res[0]).To(BeNumerically("~", time.Now().Add(10*time.Second).Unix(), 1)) + }) + + It("should HPExpireTime", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HPExpireTime(ctx, "no_such_key", "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + expireAt := time.Now().Add(10 * time.Second) + res, err := client.HPExpireAt(ctx, "myhash", expireAt, "key1", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, -2})) + + res, err = client.HPExpireTime(ctx, "myhash", "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo([]int64{expireAt.UnixMilli(), -1, -2})) + }) + + It("should HTTL", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HTTL(ctx, "no_such_key", "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err := client.HExpire(ctx, "myhash", 10*time.Second, "key1", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, -2})) + + res, err = client.HTTL(ctx, "myhash", "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{10, -1, -2})) + }) + + It("should HPTTL", Label("hash-expiration", "NonRedisEnterprise"), func() { + resEmpty, err := client.HPTTL(ctx, "no_such_key", "field1", "field2", "field3").Result() + Expect(err).To(BeNil()) + Expect(resEmpty).To(BeEquivalentTo([]int64{-2, -2, -2})) + + for i := 0; i < 100; i++ { + sadd := client.HSet(ctx, "myhash", fmt.Sprintf("key%d", i), "hello") + Expect(sadd.Err()).NotTo(HaveOccurred()) + } + + res, err := client.HExpire(ctx, "myhash", 10*time.Second, "key1", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]int64{1, -2})) + + res, err = client.HPTTL(ctx, "myhash", "key1", "key2", "key200").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res[0]).To(BeNumerically("~", 10*time.Second.Milliseconds(), 1)) + }) }) Describe("hyperloglog", func() { @@ -5685,6 +5899,78 @@ var _ = Describe("Commands", func() { Expect(err).To(Equal(redis.Nil)) }) + It("should XRead LastEntry", Label("NonRedisEnterprise"), func() { + res, err := client.XRead(ctx, &redis.XReadArgs{ + Streams: []string{"stream"}, + Count: 2, // we expect 1 message + ID: "+", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]redis.XStream{ + { + Stream: "stream", + Messages: []redis.XMessage{ + {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}}, + }, + }, + })) + }) + + It("should XRead LastEntry from two streams", Label("NonRedisEnterprise"), func() { + res, err := client.XRead(ctx, &redis.XReadArgs{ + Streams: []string{"stream", "stream"}, + ID: "+", + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(Equal([]redis.XStream{ + { + Stream: "stream", + Messages: []redis.XMessage{ + {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}}, + }, + }, + { + Stream: "stream", + Messages: []redis.XMessage{ + {ID: "3-0", Values: map[string]interface{}{"tres": "troix"}}, + }, + }, + })) + }) + + It("should XRead LastEntry blocks", Label("NonRedisEnterprise"), func() { + start := time.Now() + go func() { + defer GinkgoRecover() + + time.Sleep(100 * time.Millisecond) + id, err := client.XAdd(ctx, &redis.XAddArgs{ + Stream: "empty", + ID: "4-0", + Values: map[string]interface{}{"quatro": "quatre"}, + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(id).To(Equal("4-0")) + }() + + res, err := client.XRead(ctx, &redis.XReadArgs{ + Streams: []string{"empty"}, + Block: 500 * time.Millisecond, + ID: "+", + }).Result() + Expect(err).NotTo(HaveOccurred()) + // Ensure that the XRead call with LastEntry option blocked for at least 100ms. + Expect(time.Since(start)).To(BeNumerically(">=", 100*time.Millisecond)) + Expect(res).To(Equal([]redis.XStream{ + { + Stream: "empty", + Messages: []redis.XMessage{ + {ID: "4-0", Values: map[string]interface{}{"quatro": "quatre"}}, + }, + }, + })) + }) + Describe("group", func() { BeforeEach(func() { err := client.XGroupCreate(ctx, "stream", "group", "0").Err() diff --git a/example/otel/go.mod b/example/otel/go.mod index 2beb75db..fea4e72a 100644 --- a/example/otel/go.mod +++ b/example/otel/go.mod @@ -34,8 +34,8 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect diff --git a/example/otel/go.sum b/example/otel/go.sum index 4a481d6e..5fb4c458 100644 --- a/example/otel/go.sum +++ b/example/otel/go.sum @@ -46,10 +46,10 @@ go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/hash_commands.go b/hash_commands.go index 2c62a75a..dcffdcdd 100644 --- a/hash_commands.go +++ b/hash_commands.go @@ -1,6 +1,9 @@ package redis -import "context" +import ( + "context" + "time" +) type HashCmdable interface { HDel(ctx context.Context, key string, fields ...string) *IntCmd @@ -16,9 +19,23 @@ type HashCmdable interface { HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd HVals(ctx context.Context, key string) *StringSliceCmd HRandField(ctx context.Context, key string, count int) *StringSliceCmd HRandFieldWithValues(ctx context.Context, key string, count int) *KeyValueSliceCmd + HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd + HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd + HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd + HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd + HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd + HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd + HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd + HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd + HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd + HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd } func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { @@ -172,3 +189,262 @@ func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match str _ = c(ctx, cmd) return cmd } + +func (c cmdable) HScanNoValues(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + args = append(args, "novalues") + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +type HExpireArgs struct { + NX bool + XX bool + GT bool + LT bool +} + +// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// For more information - https://redis.io/commands/hexpire/ +func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. +// The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. +// For more information - https://redis.io/commands/hexpire/ +func (c cmdable) HExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRE", key, formatSec(ctx, expiration)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpire - Sets the expiration time for specified fields in a hash in milliseconds. +// Similar to HExpire, it accepts a key, an expiration duration in milliseconds, a struct with expiration condition flags, and a list of fields. +// The command modifies the standard time.Duration to milliseconds for the Redis command. +// For more information - https://redis.io/commands/hpexpire/ +func (c cmdable) HPExpire(ctx context.Context, key string, expiration time.Duration, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HPExpireWithArgs(ctx context.Context, key string, expiration time.Duration, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRE", key, formatMs(ctx, expiration)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in seconds. +// Takes a key, a UNIX timestamp, a struct of conditional flags, and a list of fields. +// The command sets absolute expiration times based on the UNIX timestamp provided. +// For more information - https://redis.io/commands/hexpireat/ +func (c cmdable) HExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { + + args := []interface{}{"HEXPIREAT", key, tm.Unix(), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIREAT", key, tm.Unix()} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireAt - Sets the expiration time for specified fields in a hash to a UNIX timestamp in milliseconds. +// Similar to HExpireAt but for timestamps in milliseconds. It accepts the same parameters and adjusts the UNIX time to milliseconds. +// For more information - https://redis.io/commands/hpexpireat/ +func (c cmdable) HPExpireAt(ctx context.Context, key string, tm time.Time, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond), "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HPExpireAtWithArgs(ctx context.Context, key string, tm time.Time, expirationArgs HExpireArgs, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIREAT", key, tm.UnixNano() / int64(time.Millisecond)} + + // only if one argument is true, we can add it to the args + // if more than one argument is true, it will cause an error + if expirationArgs.NX { + args = append(args, "NX") + } else if expirationArgs.XX { + args = append(args, "XX") + } else if expirationArgs.GT { + args = append(args, "GT") + } else if expirationArgs.LT { + args = append(args, "LT") + } + + args = append(args, "FIELDS", len(fields)) + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPersist - Removes the expiration time from specified fields in a hash. +// Accepts a key and the fields themselves. +// This command ensures that each field specified will have its expiration removed if present. +// For more information - https://redis.io/commands/hpersist/ +func (c cmdable) HPersist(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPERSIST", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in seconds. +// Requires a key and the fields themselves to fetch their expiration timestamps. +// This command returns the expiration times for each field or error/status codes for each field as specified. +// For more information - https://redis.io/commands/hexpiretime/ +func (c cmdable) HExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HEXPIRETIME", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPExpireTime - Retrieves the expiration time for specified fields in a hash as a UNIX timestamp in milliseconds. +// Similar to HExpireTime, adjusted for timestamps in milliseconds. It requires the same parameters. +// Provides the expiration timestamp for each field in milliseconds. +// For more information - https://redis.io/commands/hexpiretime/ +func (c cmdable) HPExpireTime(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPEXPIRETIME", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HTTL - Retrieves the remaining time to live for specified fields in a hash in seconds. +// Requires a key and the fields themselves. It returns the TTL for each specified field. +// This command fetches the TTL in seconds for each field or returns error/status codes as appropriate. +// For more information - https://redis.io/commands/httl/ +func (c cmdable) HTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HTTL", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HPTTL - Retrieves the remaining time to live for specified fields in a hash in milliseconds. +// Similar to HTTL, but returns the TTL in milliseconds. It requires a key and the specified fields. +// This command provides the TTL in milliseconds for each field or returns error/status codes as needed. +// For more information - https://redis.io/commands/hpttl/ +func (c cmdable) HPTTL(ctx context.Context, key string, fields ...string) *IntSliceCmd { + args := []interface{}{"HPTTL", key, "FIELDS", len(fields)} + + for _, field := range fields { + args = append(args, field) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/internal/pool/conn_check.go b/internal/pool/conn_check.go index 83190d39..07c261c2 100644 --- a/internal/pool/conn_check.go +++ b/internal/pool/conn_check.go @@ -3,6 +3,7 @@ package pool import ( + "crypto/tls" "errors" "io" "net" @@ -16,6 +17,10 @@ func connCheck(conn net.Conn) error { // Reset previous timeout. _ = conn.SetDeadline(time.Time{}) + // Check if tls.Conn. + if c, ok := conn.(*tls.Conn); ok { + conn = c.NetConn() + } sysConn, ok := conn.(syscall.Conn) if !ok { return nil diff --git a/internal/pool/conn_check_test.go b/internal/pool/conn_check_test.go index 2ade8a0b..21499333 100644 --- a/internal/pool/conn_check_test.go +++ b/internal/pool/conn_check_test.go @@ -3,6 +3,7 @@ package pool import ( + "crypto/tls" "net" "net/http/httptest" "time" @@ -14,12 +15,17 @@ import ( var _ = Describe("tests conn_check with real conns", func() { var ts *httptest.Server var conn net.Conn + var tlsConn *tls.Conn var err error BeforeEach(func() { ts = httptest.NewServer(nil) conn, err = net.DialTimeout(ts.Listener.Addr().Network(), ts.Listener.Addr().String(), time.Second) Expect(err).NotTo(HaveOccurred()) + tlsTestServer := httptest.NewUnstartedServer(nil) + tlsTestServer.StartTLS() + tlsConn, err = tls.DialWithDialer(&net.Dialer{Timeout: time.Second}, tlsTestServer.Listener.Addr().Network(), tlsTestServer.Listener.Addr().String(), &tls.Config{InsecureSkipVerify: true}) + Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { @@ -33,11 +39,23 @@ var _ = Describe("tests conn_check with real conns", func() { Expect(connCheck(conn)).To(HaveOccurred()) }) + It("good tls conn check", func() { + Expect(connCheck(tlsConn)).NotTo(HaveOccurred()) + + Expect(tlsConn.Close()).NotTo(HaveOccurred()) + Expect(connCheck(tlsConn)).To(HaveOccurred()) + }) + It("bad conn check", func() { Expect(conn.Close()).NotTo(HaveOccurred()) Expect(connCheck(conn)).To(HaveOccurred()) }) + It("bad tls conn check", func() { + Expect(tlsConn.Close()).NotTo(HaveOccurred()) + Expect(connCheck(tlsConn)).To(HaveOccurred()) + }) + It("check conn deadline", func() { Expect(conn.SetDeadline(time.Now())).NotTo(HaveOccurred()) time.Sleep(time.Millisecond * 10) diff --git a/internal/util.go b/internal/util.go index 235a91af..cc1bff24 100644 --- a/internal/util.go +++ b/internal/util.go @@ -3,6 +3,7 @@ package internal import ( "context" "net" + "strconv" "strings" "time" @@ -81,3 +82,47 @@ func GetAddr(addr string) string { } return net.JoinHostPort(addr[:ind], addr[ind+1:]) } + +func ToInteger(val interface{}) int { + switch v := val.(type) { + case int: + return v + case int64: + return int(v) + case string: + i, _ := strconv.Atoi(v) + return i + default: + return 0 + } +} + +func ToFloat(val interface{}) float64 { + switch v := val.(type) { + case float64: + return v + case string: + f, _ := strconv.ParseFloat(v, 64) + return f + default: + return 0.0 + } +} + +func ToString(val interface{}) string { + if str, ok := val.(string); ok { + return str + } + return "" +} + +func ToStringSlice(val interface{}) []string { + if arr, ok := val.([]interface{}); ok { + result := make([]string, len(arr)) + for i, v := range arr { + result[i] = ToString(v) + } + return result + } + return nil +} diff --git a/iterator_test.go b/iterator_test.go index 95cfcfc1..3fa50a7f 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -96,6 +96,22 @@ var _ = Describe("ScanIterator", func() { Expect(vals).To(HaveLen(71 * 2)) Expect(vals).To(ContainElement("K01")) Expect(vals).To(ContainElement("K71")) + Expect(vals).To(ContainElement("x")) + }) + + It("should hscan without values across multiple pages", Label("NonRedisEnterprise"), func() { + Expect(hashSeed(71)).NotTo(HaveOccurred()) + + var vals []string + iter := client.HScanNoValues(ctx, hashKey, 0, "", 10).Iterator() + for iter.Next(ctx) { + vals = append(vals, iter.Val()) + } + Expect(iter.Err()).NotTo(HaveOccurred()) + Expect(vals).To(HaveLen(71)) + Expect(vals).To(ContainElement("K01")) + Expect(vals).To(ContainElement("K71")) + Expect(vals).NotTo(ContainElement("x")) }) It("should scan to page borders", func() { diff --git a/json_test.go b/json_test.go index 4e9718a4..d1ea2429 100644 --- a/json_test.go +++ b/json_test.go @@ -242,18 +242,18 @@ var _ = Describe("JSON Commands", Label("json"), func() { Expect(cmd.Val()).To(Equal("OK")) }) - It("should JSONGet", Label("json.get", "json"), func() { + It("should JSONGet", Label("json.get", "json", "NonRedisEnterprise"), func() { res, err := client.JSONSet(ctx, "get3", "$", `{"a": 1, "b": 2}`).Result() Expect(err).NotTo(HaveOccurred()) Expect(res).To(Equal("OK")) res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-"}).Result() Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(`[-{--"a":1,--"b":2-}]`)) + Expect(res).To(Equal(`{-"a":1,-"b":2}`)) res, err = client.JSONGetWithArgs(ctx, "get3", &redis.JSONGetArgs{Indent: "-", Newline: `~`, Space: `!`}).Result() Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(`[~-{~--"a":!1,~--"b":!2~-}~]`)) + Expect(res).To(Equal(`{~-"a":!1,~-"b":!2~}`)) }) It("should JSONMerge", Label("json.merge", "json"), func() { diff --git a/monitor_test.go b/monitor_test.go index 8617d336..96c33bf1 100644 --- a/monitor_test.go +++ b/monitor_test.go @@ -2,6 +2,7 @@ package redis_test import ( "context" + "os" "strings" "testing" "time" @@ -12,13 +13,18 @@ import ( "github.com/redis/go-redis/v9" ) +// This test is for manual use and is not part of the CI of Go-Redis. var _ = Describe("Monitor command", Label("monitor"), func() { ctx := context.TODO() var client *redis.Client BeforeEach(func() { + if os.Getenv("RUN_MONITOR_TEST") != "true" { + Skip("Skipping Monitor command test. Set RUN_MONITOR_TEST=true to run it.") + } client = redis.NewClient(&redis.Options{Addr: ":6379"}) Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) + }) AfterEach(func() { @@ -50,6 +56,10 @@ var _ = Describe("Monitor command", Label("monitor"), func() { }) func TestMonitorCommand(t *testing.T) { + if os.Getenv("RUN_MONITOR_TEST") != "true" { + t.Skip("Skipping Monitor command test. Set RUN_MONITOR_TEST=true to run it.") + } + ctx := context.TODO() client := redis.NewClient(&redis.Options{Addr: ":6379"}) if err := client.FlushDB(ctx).Err(); err != nil { diff --git a/osscluster.go b/osscluster.go index c45159c5..6157e07f 100644 --- a/osscluster.go +++ b/osscluster.go @@ -341,6 +341,8 @@ func (n *clusterNode) Close() error { return n.Client.Close() } +const maximumNodeLatency = 1 * time.Minute + func (n *clusterNode) updateLatency() { const numProbe = 10 var dur uint64 @@ -361,7 +363,7 @@ func (n *clusterNode) updateLatency() { if successes == 0 { // If none of the pings worked, set latency to some arbitrarily high value so this node gets // least priority. - latency = float64((1 * time.Minute) / time.Microsecond) + latency = float64((maximumNodeLatency) / time.Microsecond) } else { latency = float64(dur) / float64(successes) } @@ -735,20 +737,40 @@ func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { return c.nodes.Random() } - var node *clusterNode + var allNodesFailing = true + var ( + closestNonFailingNode *clusterNode + closestNode *clusterNode + minLatency time.Duration + ) + + // setting the max possible duration as zerovalue for minlatency + minLatency = time.Duration(math.MaxInt64) + for _, n := range nodes { - if n.Failing() { - continue + if closestNode == nil || n.Latency() < minLatency { + closestNode = n + minLatency = n.Latency() + if !n.Failing() { + closestNonFailingNode = n + allNodesFailing = false + } } - if node == nil || n.Latency() < node.Latency() { - node = n - } - } - if node != nil { - return node, nil } - // If all nodes are failing - return random node + // pick the healthly node with the lowest latency + if !allNodesFailing && closestNonFailingNode != nil { + return closestNonFailingNode, nil + } + + // if all nodes are failing, we will pick the temporarily failing node with lowest latency + if minLatency < maximumNodeLatency && closestNode != nil { + internal.Logger.Printf(context.TODO(), "redis: all nodes are marked as failed, picking the temporarily failing node with lowest latency") + return closestNode, nil + } + + // If all nodes are having the maximum latency(all pings are failing) - return a random node across the cluster + internal.Logger.Printf(context.TODO(), "redis: pings to all nodes are failing, picking a random node across the cluster") return c.nodes.Random() } @@ -916,10 +938,13 @@ func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { slot := c.cmdSlot(ctx, cmd) var node *clusterNode + var moved bool var ask bool var lastErr error for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { - if attempt > 0 { + // MOVED and ASK responses are not transient errors that require retry delay; they + // should be attempted immediately. + if attempt > 0 && !moved && !ask { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { return err } @@ -963,7 +988,6 @@ func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { continue } - var moved bool var addr string moved, ask, addr = isMovedError(lastErr) if moved || ask { diff --git a/osscluster_test.go b/osscluster_test.go index 3d2f8071..f7bd1683 100644 --- a/osscluster_test.go +++ b/osscluster_test.go @@ -653,6 +653,32 @@ var _ = Describe("ClusterClient", func() { Expect(client.Close()).NotTo(HaveOccurred()) }) + It("follows node redirection immediately", func() { + // Configure retry backoffs far in excess of the expected duration of redirection + opt := redisClusterOptions() + opt.MinRetryBackoff = 10 * time.Minute + opt.MaxRetryBackoff = 20 * time.Minute + client := cluster.newClusterClient(ctx, opt) + + Eventually(func() error { + return client.SwapNodes(ctx, "A") + }, 30*time.Second).ShouldNot(HaveOccurred()) + + // Note that this context sets a deadline more aggressive than the lowest possible bound + // of the retry backoff; this verifies that redirection completes immediately. + redirCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + err := client.Set(redirCtx, "A", "VALUE", 0).Err() + Expect(err).NotTo(HaveOccurred()) + + v, err := client.Get(redirCtx, "A").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(v).To(Equal("VALUE")) + + Expect(client.Close()).NotTo(HaveOccurred()) + }) + It("calls fn for every master node", func() { for i := 0; i < 10; i++ { Expect(client.Set(ctx, strconv.Itoa(i), "", 0).Err()).NotTo(HaveOccurred()) diff --git a/pubsub.go b/pubsub.go index aea96241..72b18f49 100644 --- a/pubsub.go +++ b/pubsub.go @@ -84,7 +84,7 @@ func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, er } func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { - return cn.WithWriter(context.Background(), c.opt.WriteTimeout, func(wr *proto.Writer) error { + return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { return writeCmd(wr, cmd) }) } diff --git a/search_commands.go b/search_commands.go new file mode 100644 index 00000000..8214a570 --- /dev/null +++ b/search_commands.go @@ -0,0 +1,2192 @@ +package redis + +import ( + "context" + "fmt" + "strconv" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" +) + +type SearchCmdable interface { + FT_List(ctx context.Context) *StringSliceCmd + FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd + FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd + FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd + FTAliasDel(ctx context.Context, alias string) *StatusCmd + FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd + FTAlter(ctx context.Context, index string, skipInitalScan bool, definition []interface{}) *StatusCmd + FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd + FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd + FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd + FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd + FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd + FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDump(ctx context.Context, dict string) *StringSliceCmd + FTDropIndex(ctx context.Context, index string) *StatusCmd + FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd + FTExplain(ctx context.Context, index string, query string) *StringCmd + FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd + FTInfo(ctx context.Context, index string) *FTInfoCmd + FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd + FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd + FTSearch(ctx context.Context, index string, query string) *FTSearchCmd + FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd + FTSynDump(ctx context.Context, index string) *FTSynDumpCmd + FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd + FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd + FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd +} + +type FTCreateOptions struct { + OnHash bool + OnJSON bool + Prefix []interface{} + Filter string + DefaultLanguage string + LanguageField string + Score float64 + ScoreField string + PayloadField string + MaxTextFields int + NoOffsets bool + Temporary int + NoHL bool + NoFields bool + NoFreqs bool + StopWords []interface{} + SkipInitalScan bool +} + +type FieldSchema struct { + FieldName string + As string + FieldType SearchFieldType + Sortable bool + UNF bool + NoStem bool + NoIndex bool + PhoneticMatcher string + Weight float64 + Seperator string + CaseSensitive bool + WithSuffixtrie bool + VectorArgs *FTVectorArgs + GeoShapeFieldType string +} + +type FTVectorArgs struct { + FlatOptions *FTFlatOptions + HNSWOptions *FTHNSWOptions +} + +type FTFlatOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + BlockSize int +} + +type FTHNSWOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + MaxEdgesPerNode int + MaxAllowedEdgesPerNode int + EFRunTime int + Epsilon float64 +} + +type FTDropIndexOptions struct { + DeleteDocs bool +} + +type SpellCheckTerms struct { + Include bool + Exclude bool + Dictionary string +} + +type FTExplainOptions struct { + Dialect string +} + +type FTSynUpdateOptions struct { + SkipInitialScan bool +} + +type SearchAggregator int + +const ( + SearchInvalid = SearchAggregator(iota) + SearchAvg + SearchSum + SearchMin + SearchMax + SearchCount + SearchCountDistinct + SearchCountDistinctish + SearchStdDev + SearchQuantile + SearchToList + SearchFirstValue + SearchRandomSample +) + +func (a SearchAggregator) String() string { + switch a { + case SearchInvalid: + return "" + case SearchAvg: + return "AVG" + case SearchSum: + return "SUM" + case SearchMin: + return "MIN" + case SearchMax: + return "MAX" + case SearchCount: + return "COUNT" + case SearchCountDistinct: + return "COUNT_DISTINCT" + case SearchCountDistinctish: + return "COUNT_DISTINCTISH" + case SearchStdDev: + return "STDDEV" + case SearchQuantile: + return "QUANTILE" + case SearchToList: + return "TOLIST" + case SearchFirstValue: + return "FIRST_VALUE" + case SearchRandomSample: + return "RANDOM_SAMPLE" + default: + return "" + } +} + +type SearchFieldType int + +const ( + SearchFieldTypeInvalid = SearchFieldType(iota) + SearchFieldTypeNumeric + SearchFieldTypeTag + SearchFieldTypeText + SearchFieldTypeGeo + SearchFieldTypeVector + SearchFieldTypeGeoShape +) + +func (t SearchFieldType) String() string { + switch t { + case SearchFieldTypeInvalid: + return "" + case SearchFieldTypeNumeric: + return "NUMERIC" + case SearchFieldTypeTag: + return "TAG" + case SearchFieldTypeText: + return "TEXT" + case SearchFieldTypeGeo: + return "GEO" + case SearchFieldTypeVector: + return "VECTOR" + case SearchFieldTypeGeoShape: + return "GEOSHAPE" + default: + return "TEXT" + } +} + +// Each AggregateReducer have different args. +// Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information. +type FTAggregateReducer struct { + Reducer SearchAggregator + Args []interface{} + As string +} + +type FTAggregateGroupBy struct { + Fields []interface{} + Reduce []FTAggregateReducer +} + +type FTAggregateSortBy struct { + FieldName string + Asc bool + Desc bool +} + +type FTAggregateApply struct { + Field string + As string +} + +type FTAggregateLoad struct { + Field string + As string +} + +type FTAggregateWithCursor struct { + Count int + MaxIdle int +} + +type FTAggregateOptions struct { + Verbatim bool + LoadAll bool + Load []FTAggregateLoad + Timeout int + GroupBy []FTAggregateGroupBy + SortBy []FTAggregateSortBy + SortByMax int + Apply []FTAggregateApply + LimitOffset int + Limit int + Filter string + WithCursor bool + WithCursorOptions *FTAggregateWithCursor + Params map[string]interface{} + DialectVersion int +} + +type FTSearchFilter struct { + FieldName interface{} + Min interface{} + Max interface{} +} + +type FTSearchGeoFilter struct { + FieldName string + Longitude float64 + Latitude float64 + Radius float64 + Unit string +} + +type FTSearchReturn struct { + FieldName string + As string +} + +type FTSearchSortBy struct { + FieldName string + Asc bool + Desc bool +} + +type FTSearchOptions struct { + NoContent bool + Verbatim bool + NoStopWrods bool + WithScores bool + WithPayloads bool + WithSortKeys bool + Filters []FTSearchFilter + GeoFilter []FTSearchGeoFilter + InKeys []interface{} + InFields []interface{} + Return []FTSearchReturn + Slop int + Timeout int + InOrder bool + Language string + Expander string + Scorer string + ExplainScore bool + Payload string + SortBy []FTSearchSortBy + SortByWithCount bool + LimitOffset int + Limit int + Params map[string]interface{} + DialectVersion int +} + +type FTSynDumpResult struct { + Term string + Synonyms []string +} + +type FTSynDumpCmd struct { + baseCmd + val []FTSynDumpResult +} + +type FTAggregateResult struct { + Total int + Rows []AggregateRow +} + +type AggregateRow struct { + Fields map[string]interface{} +} + +type AggregateCmd struct { + baseCmd + val *FTAggregateResult +} + +type FTInfoResult struct { + IndexErrors IndexErrors + Attributes []FTAttribute + BytesPerRecordAvg string + Cleaning int + CursorStats CursorStats + DialectStats map[string]int + DocTableSizeMB float64 + FieldStatistics []FieldStatistic + GCStats GCStats + GeoshapesSzMB float64 + HashIndexingFailures int + IndexDefinition IndexDefinition + IndexName string + IndexOptions []string + Indexing int + InvertedSzMB float64 + KeyTableSizeMB float64 + MaxDocID int + NumDocs int + NumRecords int + NumTerms int + NumberOfUses int + OffsetBitsPerRecordAvg string + OffsetVectorsSzMB float64 + OffsetsPerTermAvg string + PercentIndexed float64 + RecordsPerDocAvg string + SortableValuesSizeMB float64 + TagOverheadSzMB float64 + TextOverheadSzMB float64 + TotalIndexMemorySzMB float64 + TotalIndexingTime int + TotalInvertedIndexBlocks int + VectorIndexSzMB float64 +} + +type IndexErrors struct { + IndexingFailures int + LastIndexingError string + LastIndexingErrorKey string +} + +type FTAttribute struct { + Identifier string + Attribute string + Type string + Weight float64 + Sortable bool + NoStem bool + NoIndex bool + UNF bool + PhoneticMatcher string + CaseSensitive bool + WithSuffixtrie bool +} + +type CursorStats struct { + GlobalIdle int + GlobalTotal int + IndexCapacity int + IndexTotal int +} + +type FieldStatistic struct { + Identifier string + Attribute string + IndexErrors IndexErrors +} + +type GCStats struct { + BytesCollected int + TotalMsRun int + TotalCycles int + AverageCycleTimeMs string + LastRunTimeMs int + GCNumericTreesMissed int + GCBlocksDenied int +} + +type IndexDefinition struct { + KeyType string + Prefixes []string + DefaultScore float64 +} + +type FTSpellCheckOptions struct { + Distance int + Terms *FTSpellCheckTerms + Dialect int +} + +type FTSpellCheckTerms struct { + Inclusion string // Either "INCLUDE" or "EXCLUDE" + Dictionary string + Terms []interface{} +} + +type SpellCheckResult struct { + Term string + Suggestions []SpellCheckSuggestion +} + +type SpellCheckSuggestion struct { + Score float64 + Suggestion string +} + +type FTSearchResult struct { + Total int + Docs []Document +} + +type Document struct { + ID string + Score *float64 + Payload *string + SortKey *string + Fields map[string]string +} + +type AggregateQuery []interface{} + +// FT_List - Lists all the existing indexes in the database. +// For more information, please refer to the Redis documentation: +// [FT._LIST]: (https://redis.io/commands/ft._list/) +func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT._LIST") + _ = c(ctx, cmd) + return cmd +} + +// FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func FTAggregateQuery(query string, options *FTAggregateOptions) AggregateQuery { + queryArgs := []interface{}{query} + if options != nil { + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + if options.LoadAll && options.Load != nil { + panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") + } + if options.LoadAll { + queryArgs = append(queryArgs, "LOAD", "*") + } + if options.Load != nil { + queryArgs = append(queryArgs, "LOAD", len(options.Load)) + for _, load := range options.Load { + queryArgs = append(queryArgs, load.Field) + if load.As != "" { + queryArgs = append(queryArgs, "AS", load.As) + } + } + } + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields)) + queryArgs = append(queryArgs, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + queryArgs = append(queryArgs, "REDUCE") + queryArgs = append(queryArgs, reducer.Reducer.String()) + if reducer.Args != nil { + queryArgs = append(queryArgs, len(reducer.Args)) + queryArgs = append(queryArgs, reducer.Args...) + } else { + queryArgs = append(queryArgs, 0) + } + if reducer.As != "" { + queryArgs = append(queryArgs, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.AGGREGATE: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + queryArgs = append(queryArgs, len(sortByOptions)) + queryArgs = append(queryArgs, sortByOptions...) + } + if options.SortByMax > 0 { + queryArgs = append(queryArgs, "MAX", options.SortByMax) + } + for _, apply := range options.Apply { + queryArgs = append(queryArgs, "APPLY", apply.Field) + if apply.As != "" { + queryArgs = append(queryArgs, "AS", apply.As) + } + } + if options.LimitOffset > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset) + } + if options.Limit > 0 { + queryArgs = append(queryArgs, options.Limit) + } + if options.Filter != "" { + queryArgs = append(queryArgs, "FILTER", options.Filter) + } + if options.WithCursor { + queryArgs = append(queryArgs, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } + } + return queryArgs +} + +func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) { + if len(data) == 0 { + return nil, fmt.Errorf("no data returned") + } + + total, ok := data[0].(int64) + if !ok { + return nil, fmt.Errorf("invalid total format") + } + + rows := make([]AggregateRow, 0, len(data)-1) + for _, row := range data[1:] { + fields, ok := row.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid row format") + } + + rowMap := make(map[string]interface{}) + for i := 0; i < len(fields); i += 2 { + key, ok := fields[i].(string) + if !ok { + return nil, fmt.Errorf("invalid field key format") + } + value := fields[i+1] + rowMap[key] = value + } + rows = append(rows, AggregateRow{Fields: rowMap}) + } + + result := &FTAggregateResult{ + Total: int(total), + Rows: rows, + } + return result, nil +} + +func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd { + return &AggregateCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) { + cmd.val = val +} + +func (cmd *AggregateCmd) Val() *FTAggregateResult { + return cmd.val +} + +func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) { + return cmd.val, cmd.err +} + +func (cmd *AggregateCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + cmd.err = err + return nil + } + cmd.val, err = ProcessAggregateResult(data) + if err != nil { + cmd.err = err + } + return nil +} + +// FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + if options != nil { + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.LoadAll && options.Load != nil { + panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") + } + if options.LoadAll { + args = append(args, "LOAD", "*") + } + if options.Load != nil { + args = append(args, "LOAD", len(options.Load)) + for _, load := range options.Load { + args = append(args, load.Field) + if load.As != "" { + args = append(args, "AS", load.As) + } + } + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + args = append(args, "GROUPBY", len(groupBy.Fields)) + args = append(args, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + args = append(args, "REDUCE") + args = append(args, reducer.Reducer.String()) + if reducer.Args != nil { + args = append(args, len(reducer.Args)) + args = append(args, reducer.Args...) + } else { + args = append(args, 0) + } + if reducer.As != "" { + args = append(args, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + args = append(args, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.AGGREGATE: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + args = append(args, len(sortByOptions)) + args = append(args, sortByOptions...) + } + if options.SortByMax > 0 { + args = append(args, "MAX", options.SortByMax) + } + for _, apply := range options.Apply { + args = append(args, "APPLY", apply.Field) + if apply.As != "" { + args = append(args, "AS", apply.As) + } + } + if options.LimitOffset > 0 { + args = append(args, "LIMIT", options.LimitOffset) + } + if options.Limit > 0 { + args = append(args, options.Limit) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.WithCursor { + args = append(args, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + args = append(args, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } + } + + cmd := NewAggregateCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasAdd - Adds an alias to an index. +// The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias. +// For more information, please refer to the Redis documentation: +// [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/) +func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd { + args := []interface{}{"FT.ALIASADD", alias, index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasDel - Removes an alias from an index. +// The 'alias' parameter specifies the alias to be removed. +// For more information, please refer to the Redis documentation: +// [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/) +func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasUpdate - Updates an alias to an index. +// The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias. +// If the alias already exists for a different index, it updates the alias to point to the specified index instead. +// For more information, please refer to the Redis documentation: +// [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/) +func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index) + _ = c(ctx, cmd) + return cmd +} + +// FTAlter - Alters the definition of an existing index. +// The 'index' parameter specifies the index to alter, and the 'skipInitalScan' parameter specifies whether to skip the initial scan. +// The 'definition' parameter specifies the new definition for the index. +// For more information, please refer to the Redis documentation: +// [FT.ALTER]: (https://redis.io/commands/ft.alter/) +func (c cmdable) FTAlter(ctx context.Context, index string, skipInitalScan bool, definition []interface{}) *StatusCmd { + args := []interface{}{"FT.ALTER", index} + if skipInitalScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, "SCHEMA", "ADD") + args = append(args, definition...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTConfigGet - Retrieves the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to retrieve. +// For more information, please refer to the Redis documentation: +// [FT.CONFIG GET]: (https://redis.io/commands/ft.config-get/) +func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd { + cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option) + _ = c(ctx, cmd) + return cmd +} + +// FTConfigSet - Sets the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value. +// For more information, please refer to the Redis documentation: +// [FT.CONFIG SET]: (https://redis.io/commands/ft.config-set/) +func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value) + _ = c(ctx, cmd) + return cmd +} + +// FTCreate - Creates a new index with the given options and schema. +// The 'index' parameter specifies the name of the index to create. +// The 'options' parameter specifies various options for the index, such as: +// whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc. +// The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc. +// For more information, please refer to the Redis documentation: +// [FT.CREATE]: (https://redis.io/commands/ft.create/) +func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd { + args := []interface{}{"FT.CREATE", index} + if options != nil { + if options.OnHash && !options.OnJSON { + args = append(args, "ON", "HASH") + } + if options.OnJSON && !options.OnHash { + args = append(args, "ON", "JSON") + } + if options.OnHash && options.OnJSON { + panic("FT.CREATE: ON HASH and ON JSON are mutually exclusive") + } + if options.Prefix != nil { + args = append(args, "PREFIX", len(options.Prefix)) + args = append(args, options.Prefix...) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.DefaultLanguage != "" { + args = append(args, "LANGUAGE", options.DefaultLanguage) + } + if options.LanguageField != "" { + args = append(args, "LANGUAGE_FIELD", options.LanguageField) + } + if options.Score > 0 { + args = append(args, "SCORE", options.Score) + } + if options.ScoreField != "" { + args = append(args, "SCORE_FIELD", options.ScoreField) + } + if options.PayloadField != "" { + args = append(args, "PAYLOAD_FIELD", options.PayloadField) + } + if options.MaxTextFields > 0 { + args = append(args, "MAXTEXTFIELDS", options.MaxTextFields) + } + if options.NoOffsets { + args = append(args, "NOOFFSETS") + } + if options.Temporary > 0 { + args = append(args, "TEMPORARY", options.Temporary) + } + if options.NoHL { + args = append(args, "NOHL") + } + if options.NoFields { + args = append(args, "NOFIELDS") + } + if options.NoFreqs { + args = append(args, "NOFREQS") + } + if options.StopWords != nil { + args = append(args, "STOPWORDS", len(options.StopWords)) + args = append(args, options.StopWords...) + } + if options.SkipInitalScan { + args = append(args, "SKIPINITIALSCAN") + } + } + if schema == nil { + panic("FT.CREATE: SCHEMA is required") + } + args = append(args, "SCHEMA") + for _, schema := range schema { + if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid { + panic("FT.CREATE: SCHEMA FieldName and FieldType are required") + } + args = append(args, schema.FieldName) + if schema.As != "" { + args = append(args, "AS", schema.As) + } + args = append(args, schema.FieldType.String()) + if schema.VectorArgs != nil { + if schema.FieldType != SearchFieldTypeVector { + panic("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs") + } + if schema.VectorArgs.FlatOptions != nil && schema.VectorArgs.HNSWOptions != nil { + panic("FT.CREATE: SCHEMA VectorArgs FlatOptions and HNSWOptions are mutually exclusive") + } + if schema.VectorArgs.FlatOptions != nil { + args = append(args, "FLAT") + if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" { + panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT") + } + flatArgs := []interface{}{ + "TYPE", schema.VectorArgs.FlatOptions.Type, + "DIM", schema.VectorArgs.FlatOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric, + } + if schema.VectorArgs.FlatOptions.InitialCapacity > 0 { + flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity) + } + if schema.VectorArgs.FlatOptions.BlockSize > 0 { + flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize) + } + args = append(args, len(flatArgs)) + args = append(args, flatArgs...) + } + if schema.VectorArgs.HNSWOptions != nil { + args = append(args, "HNSW") + if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" { + panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW") + } + hnswArgs := []interface{}{ + "TYPE", schema.VectorArgs.HNSWOptions.Type, + "DIM", schema.VectorArgs.HNSWOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric, + } + if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 { + hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity) + } + if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.EFRunTime > 0 { + hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime) + } + if schema.VectorArgs.HNSWOptions.Epsilon > 0 { + hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon) + } + args = append(args, len(hnswArgs)) + args = append(args, hnswArgs...) + } + } + if schema.GeoShapeFieldType != "" { + if schema.FieldType != SearchFieldTypeGeoShape { + panic("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType") + } + args = append(args, schema.GeoShapeFieldType) + } + if schema.NoStem { + args = append(args, "NOSTEM") + } + if schema.Sortable { + args = append(args, "SORTABLE") + } + if schema.UNF { + args = append(args, "UNF") + } + if schema.NoIndex { + args = append(args, "NOINDEX") + } + if schema.PhoneticMatcher != "" { + args = append(args, "PHONETIC", schema.PhoneticMatcher) + } + if schema.Weight > 0 { + args = append(args, "WEIGHT", schema.Weight) + } + if schema.Seperator != "" { + args = append(args, "SEPERATOR", schema.Seperator) + } + if schema.CaseSensitive { + args = append(args, "CASESENSITIVE") + } + if schema.WithSuffixtrie { + args = append(args, "WITHSUFFIXTRIE") + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorDel - Deletes a cursor from an existing index. +// The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/) +func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorRead - Reads the next results from an existing cursor. +// The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/) +func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd { + args := []interface{}{"FT.CURSOR", "READ", index, cursorId} + if count > 0 { + args = append(args, "COUNT", count) + } + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictAdd - Adds terms to a dictionary. +// The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add. +// For more information, please refer to the Redis documentation: +// [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/) +func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTADD", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDel - Deletes terms from a dictionary. +// The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete. +// For more information, please refer to the Redis documentation: +// [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/) +func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTDEL", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDump - Returns all terms in the specified dictionary. +// The 'dict' parameter specifies the dictionary from which to return the terms. +// For more information, please refer to the Redis documentation: +// [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/) +func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndex - Deletes an index. +// The 'index' parameter specifies the index to delete. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndexWithArgs - Deletes an index with options. +// The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + if options != nil { + if options.DeleteDocs { + args = append(args, "DD") + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplain - Returns the execution plan for a complex query. +// The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd { + cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainWithArgs - Returns the execution plan for a complex query with options. +// The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd { + args := []interface{}{"FT.EXPLAIN", index, query} + if options.Dialect != "" { + args = append(args, "DIALECT", options.Dialect) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainCli - Returns the execution plan for a complex query. [Not Implemented] +// For more information, see https://redis.io/commands/ft.explaincli/ +func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error { + panic("not implemented") +} + +func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) { + var ftInfo FTInfoResult + // Manually parse each field from the map + if indexErrors, ok := data["Index Errors"].([]interface{}); ok { + ftInfo.IndexErrors = IndexErrors{ + IndexingFailures: internal.ToInteger(indexErrors[1]), + LastIndexingError: internal.ToString(indexErrors[3]), + LastIndexingErrorKey: internal.ToString(indexErrors[5]), + } + } + + if attributes, ok := data["attributes"].([]interface{}); ok { + for _, attr := range attributes { + if attrMap, ok := attr.([]interface{}); ok { + att := FTAttribute{} + for i := 0; i < len(attrMap); i++ { + if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" { + att.Attribute = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" { + att.Identifier = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "type" { + att.Type = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "weight" { + att.Weight = internal.ToFloat(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" { + att.NoStem = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" { + att.Sortable = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" { + att.NoIndex = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "unf" { + att.UNF = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" { + att.PhoneticMatcher = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" { + att.CaseSensitive = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" { + att.WithSuffixtrie = true + continue + } + + } + ftInfo.Attributes = append(ftInfo.Attributes, att) + } + } + } + + ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"]) + ftInfo.Cleaning = internal.ToInteger(data["cleaning"]) + + if cursorStats, ok := data["cursor_stats"].([]interface{}); ok { + ftInfo.CursorStats = CursorStats{ + GlobalIdle: internal.ToInteger(cursorStats[1]), + GlobalTotal: internal.ToInteger(cursorStats[3]), + IndexCapacity: internal.ToInteger(cursorStats[5]), + IndexTotal: internal.ToInteger(cursorStats[7]), + } + } + + if dialectStats, ok := data["dialect_stats"].([]interface{}); ok { + ftInfo.DialectStats = make(map[string]int) + for i := 0; i < len(dialectStats); i += 2 { + ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1]) + } + } + + ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"]) + + if fieldStats, ok := data["field statistics"].([]interface{}); ok { + for _, stat := range fieldStats { + if statMap, ok := stat.([]interface{}); ok { + ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{ + Identifier: internal.ToString(statMap[1]), + Attribute: internal.ToString(statMap[3]), + IndexErrors: IndexErrors{ + IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]), + LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]), + LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]), + }, + }) + } + } + } + + if gcStats, ok := data["gc_stats"].([]interface{}); ok { + ftInfo.GCStats = GCStats{} + for i := 0; i < len(gcStats); i += 2 { + if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" { + ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" { + ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" { + ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" { + ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" { + ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" { + ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" { + ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1]) + continue + } + } + } + + ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"]) + ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"]) + + if indexDef, ok := data["index_definition"].([]interface{}); ok { + ftInfo.IndexDefinition = IndexDefinition{ + KeyType: internal.ToString(indexDef[1]), + Prefixes: internal.ToStringSlice(indexDef[3]), + DefaultScore: internal.ToFloat(indexDef[5]), + } + } + + ftInfo.IndexName = internal.ToString(data["index_name"]) + ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{})) + ftInfo.Indexing = internal.ToInteger(data["indexing"]) + ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"]) + ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"]) + ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"]) + ftInfo.NumDocs = internal.ToInteger(data["num_docs"]) + ftInfo.NumRecords = internal.ToInteger(data["num_records"]) + ftInfo.NumTerms = internal.ToInteger(data["num_terms"]) + ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"]) + ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"]) + ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"]) + ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"]) + ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"]) + ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"]) + ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"]) + ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"]) + ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"]) + ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"]) + ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"]) + ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"]) + ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"]) + + return ftInfo, nil +} + +type FTInfoCmd struct { + baseCmd + val FTInfoResult +} + +func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd { + return &FTInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTInfoCmd) SetVal(val FTInfoResult) { + cmd.val = val +} + +func (cmd *FTInfoCmd) Result() (FTInfoResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTInfoCmd) Val() FTInfoResult { + return cmd.val +} + +func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + data := make(map[string]interface{}, n) + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + data[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + data[k] = err + continue + } + return err + } + data[k] = v + } + cmd.val, err = parseFTInfo(data) + if err != nil { + cmd.err = err + } + + return nil +} + +// FTInfo - Retrieves information about an index. +// The 'index' parameter specifies the index to retrieve information about. +// For more information, please refer to the Redis documentation: +// [FT.INFO]: (https://redis.io/commands/ft.info/) +func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd { + cmd := newFTInfoCmd(ctx, "FT.INFO", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheck - Checks a query string for spelling errors. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + if options != nil { + if options.Distance > 0 { + args = append(args, "DISTANCE", options.Distance) + } + if options.Terms != nil { + args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary) + args = append(args, options.Terms.Terms...) + } + if options.Dialect > 0 { + args = append(args, "DIALECT", options.Dialect) + } + } + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type FTSpellCheckCmd struct { + baseCmd + val []SpellCheckResult +} + +func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd { + return &FTSpellCheckCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTSpellCheckCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) { + cmd.val = val +} + +func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult { + return cmd.val +} + +func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + cmd.err = err + return nil + } + cmd.val, err = parseFTSpellCheck(data) + if err != nil { + cmd.err = err + } + return nil +} + +func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) { + results := make([]SpellCheckResult, 0, len(data)) + + for _, termData := range data { + termInfo, ok := termData.([]interface{}) + if !ok || len(termInfo) != 3 { + return nil, fmt.Errorf("invalid term format") + } + + term, ok := termInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid term format") + } + + suggestionsData, ok := termInfo[2].([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid suggestions format") + } + + suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData)) + for _, suggestionData := range suggestionsData { + suggestionInfo, ok := suggestionData.([]interface{}) + if !ok || len(suggestionInfo) != 2 { + return nil, fmt.Errorf("invalid suggestion format") + } + + scoreStr, ok := suggestionInfo[0].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion score format") + } + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return nil, fmt.Errorf("invalid suggestion score value") + } + + suggestion, ok := suggestionInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion format") + } + + suggestions = append(suggestions, SpellCheckSuggestion{ + Score: score, + Suggestion: suggestion, + }) + } + + results = append(results, SpellCheckResult{ + Term: term, + Suggestions: suggestions, + }) + } + + return results, nil +} + +func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) { + if len(data) < 1 { + return FTSearchResult{}, fmt.Errorf("unexpected search result format") + } + + total, ok := data[0].(int64) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid total results format") + } + + var results []Document + for i := 1; i < len(data); { + docID, ok := data[i].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid document ID format") + } + + doc := Document{ + ID: docID, + Fields: make(map[string]string), + } + i++ + + if noContent { + results = append(results, doc) + continue + } + + if withScores && i < len(data) { + if scoreStr, ok := data[i].(string); ok { + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return FTSearchResult{}, fmt.Errorf("invalid score format") + } + doc.Score = &score + i++ + } + } + + if withPayloads && i < len(data) { + if payload, ok := data[i].(string); ok { + doc.Payload = &payload + i++ + } + } + + if withSortKeys && i < len(data) { + if sortKey, ok := data[i].(string); ok { + doc.SortKey = &sortKey + i++ + } + } + + if i < len(data) { + fields, ok := data[i].([]interface{}) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid document fields format") + } + + for j := 0; j < len(fields); j += 2 { + key, ok := fields[j].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field key format") + } + value, ok := fields[j+1].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field value format") + } + doc.Fields[key] = value + } + i++ + } + + results = append(results, doc) + } + return FTSearchResult{ + Total: int(total), + Docs: results, + }, nil +} + +type FTSearchCmd struct { + baseCmd + val FTSearchResult + options *FTSearchOptions +} + +func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd { + return &FTSearchCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + options: options, + } +} + +func (cmd *FTSearchCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSearchCmd) SetVal(val FTSearchResult) { + cmd.val = val +} + +func (cmd *FTSearchCmd) Result() (FTSearchResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSearchCmd) Val() FTSearchResult { + return cmd.val +} + +func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + cmd.err = err + return nil + } + cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys) + if err != nil { + cmd.err = err + } + return nil +} + +// FTSearch - Executes a search query on an index. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation: +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...) + _ = c(ctx, cmd) + return cmd +} + +type SearchQuery []interface{} + +func FTSearchQuery(query string, options *FTSearchOptions) SearchQuery { + queryArgs := []interface{}{query} + if options != nil { + if options.NoContent { + queryArgs = append(queryArgs, "NOCONTENT") + } + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + if options.NoStopWrods { + queryArgs = append(queryArgs, "NOSTOPWORDS") + } + if options.WithScores { + queryArgs = append(queryArgs, "WITHSCORES") + } + if options.WithPayloads { + queryArgs = append(queryArgs, "WITHPAYLOADS") + } + if options.WithSortKeys { + queryArgs = append(queryArgs, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + queryArgs = append(queryArgs, "INKEYS", len(options.InKeys)) + queryArgs = append(queryArgs, options.InKeys...) + } + if options.InFields != nil { + queryArgs = append(queryArgs, "INFIELDS", len(options.InFields)) + queryArgs = append(queryArgs, options.InFields...) + } + if options.Return != nil { + queryArgs = append(queryArgs, "RETURN") + queryArgsReturn := []interface{}{} + for _, ret := range options.Return { + queryArgsReturn = append(queryArgsReturn, ret.FieldName) + if ret.As != "" { + queryArgsReturn = append(queryArgsReturn, "AS", ret.As) + } + } + queryArgs = append(queryArgs, len(queryArgsReturn)) + queryArgs = append(queryArgs, queryArgsReturn...) + } + if options.Slop > 0 { + queryArgs = append(queryArgs, "SLOP", options.Slop) + } + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + if options.InOrder { + queryArgs = append(queryArgs, "INORDER") + } + if options.Language != "" { + queryArgs = append(queryArgs, "LANGUAGE", options.Language) + } + if options.Expander != "" { + queryArgs = append(queryArgs, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + if options.ExplainScore { + queryArgs = append(queryArgs, "EXPLAINSCORE") + } + if options.Payload != "" { + queryArgs = append(queryArgs, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + for _, sortBy := range options.SortBy { + queryArgs = append(queryArgs, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.SEARCH: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + queryArgs = append(queryArgs, "ASC") + } + if sortBy.Desc { + queryArgs = append(queryArgs, "DESC") + } + } + if options.SortByWithCount { + queryArgs = append(queryArgs, "WITHCOUT") + } + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } + } + return queryArgs +} + +// FTSearchWithArgs - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation: +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + if options != nil { + if options.NoContent { + args = append(args, "NOCONTENT") + } + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.NoStopWrods { + args = append(args, "NOSTOPWORDS") + } + if options.WithScores { + args = append(args, "WITHSCORES") + } + if options.WithPayloads { + args = append(args, "WITHPAYLOADS") + } + if options.WithSortKeys { + args = append(args, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + args = append(args, "INKEYS", len(options.InKeys)) + args = append(args, options.InKeys...) + } + if options.InFields != nil { + args = append(args, "INFIELDS", len(options.InFields)) + args = append(args, options.InFields...) + } + if options.Return != nil { + args = append(args, "RETURN") + argsReturn := []interface{}{} + for _, ret := range options.Return { + argsReturn = append(argsReturn, ret.FieldName) + if ret.As != "" { + argsReturn = append(argsReturn, "AS", ret.As) + } + } + args = append(args, len(argsReturn)) + args = append(args, argsReturn...) + } + if options.Slop > 0 { + args = append(args, "SLOP", options.Slop) + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + if options.InOrder { + args = append(args, "INORDER") + } + if options.Language != "" { + args = append(args, "LANGUAGE", options.Language) + } + if options.Expander != "" { + args = append(args, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.ExplainScore { + args = append(args, "EXPLAINSCORE") + } + if options.Payload != "" { + args = append(args, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + args = append(args, "SORTBY") + for _, sortBy := range options.SortBy { + args = append(args, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.SEARCH: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + args = append(args, "ASC") + } + if sortBy.Desc { + args = append(args, "DESC") + } + } + if options.SortByWithCount { + args = append(args, "WITHCOUT") + } + } + if options.LimitOffset >= 0 && options.Limit > 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } + } + cmd := newFTSearchCmd(ctx, options, args...) + _ = c(ctx, cmd) + return cmd +} + +func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd { + return &FTSynDumpCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTSynDumpCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) { + cmd.val = val +} + +func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult { + return cmd.val +} + +func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error { + termSynonymPairs, err := rd.ReadSlice() + if err != nil { + return err + } + + var results []FTSynDumpResult + for i := 0; i < len(termSynonymPairs); i += 2 { + term, ok := termSynonymPairs[i].(string) + if !ok { + return fmt.Errorf("invalid term format") + } + + synonyms, ok := termSynonymPairs[i+1].([]interface{}) + if !ok { + return fmt.Errorf("invalid synonyms format") + } + + synonymList := make([]string, len(synonyms)) + for j, syn := range synonyms { + synonym, ok := syn.(string) + if !ok { + return fmt.Errorf("invalid synonym format") + } + synonymList[j] = synonym + } + + results = append(results, FTSynDumpResult{ + Term: term, + Synonyms: synonymList, + }) + } + + cmd.val = results + return nil +} + +// FTSynDump - Dumps the contents of a synonym group. +// The 'index' parameter specifies the index to dump. +// For more information, please refer to the Redis documentation: +// [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/) +func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd { + cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdate - Creates or updates a synonym group with additional terms. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTTagVals - Returns all distinct values indexed in a tag field. +// The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from. +// For more information, please refer to the Redis documentation: +// [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/) +func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field) + _ = c(ctx, cmd) + return cmd +} + +// type FTProfileResult struct { +// Results []interface{} +// Profile ProfileDetails +// } + +// type ProfileDetails struct { +// TotalProfileTime string +// ParsingTime string +// PipelineCreationTime string +// Warning string +// IteratorsProfile []IteratorProfile +// ResultProcessorsProfile []ResultProcessorProfile +// } + +// type IteratorProfile struct { +// Type string +// QueryType string +// Time interface{} +// Counter int +// Term string +// Size int +// ChildIterators []IteratorProfile +// } + +// type ResultProcessorProfile struct { +// Type string +// Time interface{} +// Counter int +// } + +// func parseFTProfileResult(data []interface{}) (FTProfileResult, error) { +// var result FTProfileResult +// if len(data) < 2 { +// return result, fmt.Errorf("unexpected data length") +// } + +// // Parse results +// result.Results = data[0].([]interface{}) + +// // Parse profile details +// profileData := data[1].([]interface{}) +// profileDetails := ProfileDetails{} +// for i := 0; i < len(profileData); i += 2 { +// switch profileData[i].(string) { +// case "Total profile time": +// profileDetails.TotalProfileTime = profileData[i+1].(string) +// case "Parsing time": +// profileDetails.ParsingTime = profileData[i+1].(string) +// case "Pipeline creation time": +// profileDetails.PipelineCreationTime = profileData[i+1].(string) +// case "Warning": +// profileDetails.Warning = profileData[i+1].(string) +// case "Iterators profile": +// profileDetails.IteratorsProfile = parseIteratorsProfile(profileData[i+1].([]interface{})) +// case "Result processors profile": +// profileDetails.ResultProcessorsProfile = parseResultProcessorsProfile(profileData[i+1].([]interface{})) +// } +// } + +// result.Profile = profileDetails +// return result, nil +// } + +// func parseIteratorsProfile(data []interface{}) []IteratorProfile { +// var iterators []IteratorProfile +// for _, item := range data { +// profile := item.([]interface{}) +// iterator := IteratorProfile{} +// for i := 0; i < len(profile); i += 2 { +// switch profile[i].(string) { +// case "Type": +// iterator.Type = profile[i+1].(string) +// case "Query type": +// iterator.QueryType = profile[i+1].(string) +// case "Time": +// iterator.Time = profile[i+1] +// case "Counter": +// iterator.Counter = int(profile[i+1].(int64)) +// case "Term": +// iterator.Term = profile[i+1].(string) +// case "Size": +// iterator.Size = int(profile[i+1].(int64)) +// case "Child iterators": +// iterator.ChildIterators = parseChildIteratorsProfile(profile[i+1].([]interface{})) +// } +// } +// iterators = append(iterators, iterator) +// } +// return iterators +// } + +// func parseChildIteratorsProfile(data []interface{}) []IteratorProfile { +// var iterators []IteratorProfile +// for _, item := range data { +// profile := item.([]interface{}) +// iterator := IteratorProfile{} +// for i := 0; i < len(profile); i += 2 { +// switch profile[i].(string) { +// case "Type": +// iterator.Type = profile[i+1].(string) +// case "Query type": +// iterator.QueryType = profile[i+1].(string) +// case "Time": +// iterator.Time = profile[i+1] +// case "Counter": +// iterator.Counter = int(profile[i+1].(int64)) +// case "Term": +// iterator.Term = profile[i+1].(string) +// case "Size": +// iterator.Size = int(profile[i+1].(int64)) +// } +// } +// iterators = append(iterators, iterator) +// } +// return iterators +// } + +// func parseResultProcessorsProfile(data []interface{}) []ResultProcessorProfile { +// var processors []ResultProcessorProfile +// for _, item := range data { +// profile := item.([]interface{}) +// processor := ResultProcessorProfile{} +// for i := 0; i < len(profile); i += 2 { +// switch profile[i].(string) { +// case "Type": +// processor.Type = profile[i+1].(string) +// case "Time": +// processor.Time = profile[i+1] +// case "Counter": +// processor.Counter = int(profile[i+1].(int64)) +// } +// } +// processors = append(processors, processor) +// } +// return processors +// } + +// func NewFTProfileCmd(ctx context.Context, args ...interface{}) *FTProfileCmd { +// return &FTProfileCmd{ +// baseCmd: baseCmd{ +// ctx: ctx, +// args: args, +// }, +// } +// } + +// type FTProfileCmd struct { +// baseCmd +// val FTProfileResult +// } + +// func (cmd *FTProfileCmd) String() string { +// return cmdString(cmd, cmd.val) +// } + +// func (cmd *FTProfileCmd) SetVal(val FTProfileResult) { +// cmd.val = val +// } + +// func (cmd *FTProfileCmd) Result() (FTProfileResult, error) { +// return cmd.val, cmd.err +// } + +// func (cmd *FTProfileCmd) Val() FTProfileResult { +// return cmd.val +// } + +// func (cmd *FTProfileCmd) readReply(rd *proto.Reader) (err error) { +// data, err := rd.ReadSlice() +// if err != nil { +// return err +// } +// cmd.val, err = parseFTProfileResult(data) +// if err != nil { +// cmd.err = err +// } +// return nil +// } + +// // FTProfile - Executes a search query and returns a profile of how the query was processed. +// // The 'index' parameter specifies the index to search, the 'limited' parameter specifies whether to limit the results, +// // and the 'query' parameter specifies the search / aggreagte query. Please notice that you must either pass a SearchQuery or an AggregateQuery. +// // For more information, please refer to the Redis documentation: +// // [FT.PROFILE]: (https://redis.io/commands/ft.profile/) +// func (c cmdable) FTProfile(ctx context.Context, index string, limited bool, query interface{}) *FTProfileCmd { +// queryType := "" +// var argsQuery []interface{} + +// switch v := query.(type) { +// case AggregateQuery: +// queryType = "AGGREGATE" +// argsQuery = v +// case SearchQuery: +// queryType = "SEARCH" +// argsQuery = v +// default: +// panic("FT.PROFILE: query must be either AggregateQuery or SearchQuery") +// } + +// args := []interface{}{"FT.PROFILE", index, queryType} + +// if limited { +// args = append(args, "LIMITED") +// } +// args = append(args, "QUERY") +// args = append(args, argsQuery...) + +// cmd := NewFTProfileCmd(ctx, args...) +// _ = c(ctx, cmd) +// return cmd +// } diff --git a/search_test.go b/search_test.go new file mode 100644 index 00000000..60888ef5 --- /dev/null +++ b/search_test.go @@ -0,0 +1,1136 @@ +package redis_test + +import ( + "context" + "time" + + . "github.com/bsm/ginkgo/v2" + . "github.com/bsm/gomega" + "github.com/redis/go-redis/v9" +) + +func WaitForIndexing(c *redis.Client, index string) { + for { + res, err := c.FTInfo(context.Background(), index).Result() + Expect(err).NotTo(HaveOccurred()) + if c.Options().Protocol == 2 { + if res.Indexing == 0 { + return + } + time.Sleep(100 * time.Millisecond) + } + } +} + +var _ = Describe("RediSearch commands", Label("search"), func() { + ctx := context.TODO() + var client *redis.Client + + BeforeEach(func() { + client = redis.NewClient(&redis.Options{Addr: ":6379", Protocol: 2}) + Expect(client.FlushDB(ctx).Err()).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + Expect(client.Close()).NotTo(HaveOccurred()) + }) + + It("should FTCreate and FTSearch WithScores", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "txt") + client.HSet(ctx, "doc1", "txt", "foo baz") + client.HSet(ctx, "doc2", "txt", "foo bar") + res, err := client.FTSearchWithArgs(ctx, "txt", "foo ~bar", &redis.FTSearchOptions{WithScores: true}).Result() + + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(int64(2))) + for _, doc := range res.Docs { + Expect(*doc.Score).To(BeNumerically(">", 0)) + Expect(doc.ID).To(Or(Equal("doc1"), Equal("doc2"))) + } + }) + + It("should FTCreate and FTSearch stopwords", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{StopWords: []interface{}{"foo", "bar", "baz"}}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "txt") + client.HSet(ctx, "doc1", "txt", "foo baz") + client.HSet(ctx, "doc2", "txt", "hello world") + res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{NoContent: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(0))) + res2, err := client.FTSearchWithArgs(ctx, "txt", "foo bar hello world", &redis.FTSearchOptions{NoContent: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res2.Total).To(BeEquivalentTo(int64(1))) + }) + + It("should FTCreate and FTSearch filters", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}, &redis.FieldSchema{FieldName: "num", FieldType: redis.SearchFieldTypeNumeric}, &redis.FieldSchema{FieldName: "loc", FieldType: redis.SearchFieldTypeGeo}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "txt") + client.HSet(ctx, "doc1", "txt", "foo bar", "num", 3.141, "loc", "-0.441,51.458") + client.HSet(ctx, "doc2", "txt", "foo baz", "num", 2, "loc", "-0.1,51.2") + res1, err := client.FTSearchWithArgs(ctx, "txt", "foo", &redis.FTSearchOptions{Filters: []redis.FTSearchFilter{{FieldName: "num", Min: 0, Max: 2}}, NoContent: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(1))) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("doc2")) + res2, err := client.FTSearchWithArgs(ctx, "txt", "foo", &redis.FTSearchOptions{Filters: []redis.FTSearchFilter{{FieldName: "num", Min: 0, Max: "+inf"}}, NoContent: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res2.Total).To(BeEquivalentTo(int64(2))) + Expect(res2.Docs[0].ID).To(BeEquivalentTo("doc1")) + // Test Geo filter + geoFilter1 := redis.FTSearchGeoFilter{FieldName: "loc", Longitude: -0.44, Latitude: 51.45, Radius: 10, Unit: "km"} + geoFilter2 := redis.FTSearchGeoFilter{FieldName: "loc", Longitude: -0.44, Latitude: 51.45, Radius: 100, Unit: "km"} + res3, err := client.FTSearchWithArgs(ctx, "txt", "foo", &redis.FTSearchOptions{GeoFilter: []redis.FTSearchGeoFilter{geoFilter1}, NoContent: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res3.Total).To(BeEquivalentTo(int64(1))) + Expect(res3.Docs[0].ID).To(BeEquivalentTo("doc1")) + res4, err := client.FTSearchWithArgs(ctx, "txt", "foo", &redis.FTSearchOptions{GeoFilter: []redis.FTSearchGeoFilter{geoFilter2}, NoContent: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res4.Total).To(BeEquivalentTo(int64(2))) + docs := []interface{}{res4.Docs[0].ID, res4.Docs[1].ID} + Expect(docs).To(ContainElement("doc1")) + Expect(docs).To(ContainElement("doc2")) + + }) + + It("should FTCreate and FTSearch sortby", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "num", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}, &redis.FieldSchema{FieldName: "num", FieldType: redis.SearchFieldTypeNumeric, Sortable: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "num") + client.HSet(ctx, "doc1", "txt", "foo bar", "num", 1) + client.HSet(ctx, "doc2", "txt", "foo baz", "num", 2) + client.HSet(ctx, "doc3", "txt", "foo qux", "num", 3) + + sortBy1 := redis.FTSearchSortBy{FieldName: "num", Asc: true} + sortBy2 := redis.FTSearchSortBy{FieldName: "num", Desc: true} + res1, err := client.FTSearchWithArgs(ctx, "num", "foo", &redis.FTSearchOptions{NoContent: true, SortBy: []redis.FTSearchSortBy{sortBy1}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(3))) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("doc1")) + Expect(res1.Docs[1].ID).To(BeEquivalentTo("doc2")) + Expect(res1.Docs[2].ID).To(BeEquivalentTo("doc3")) + + res2, err := client.FTSearchWithArgs(ctx, "num", "foo", &redis.FTSearchOptions{NoContent: true, SortBy: []redis.FTSearchSortBy{sortBy2}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res2.Total).To(BeEquivalentTo(int64(3))) + Expect(res2.Docs[2].ID).To(BeEquivalentTo("doc1")) + Expect(res2.Docs[1].ID).To(BeEquivalentTo("doc2")) + Expect(res2.Docs[0].ID).To(BeEquivalentTo("doc3")) + + }) + + It("should FTCreate and FTSearch example", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "title", FieldType: redis.SearchFieldTypeText, Weight: 5}, &redis.FieldSchema{FieldName: "body", FieldType: redis.SearchFieldTypeText}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "txt") + client.HSet(ctx, "doc1", "title", "RediSearch", "body", "Redisearch impements a search engine on top of redis") + res1, err := client.FTSearchWithArgs(ctx, "txt", "search engine", &redis.FTSearchOptions{NoContent: true, Verbatim: true, LimitOffset: 0, Limit: 5}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(1))) + + }) + + It("should FTCreate NoIndex", Label("search", "ftcreate", "ftsearch"), func() { + text1 := &redis.FieldSchema{FieldName: "field", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "text", FieldType: redis.SearchFieldTypeText, NoIndex: true, Sortable: true} + num := &redis.FieldSchema{FieldName: "numeric", FieldType: redis.SearchFieldTypeNumeric, NoIndex: true, Sortable: true} + geo := &redis.FieldSchema{FieldName: "geo", FieldType: redis.SearchFieldTypeGeo, NoIndex: true, Sortable: true} + tag := &redis.FieldSchema{FieldName: "tag", FieldType: redis.SearchFieldTypeTag, NoIndex: true, Sortable: true} + val, err := client.FTCreate(ctx, "idx", &redis.FTCreateOptions{}, text1, text2, num, geo, tag).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx") + client.HSet(ctx, "doc1", "field", "aaa", "text", "1", "numeric", 1, "geo", "1,1", "tag", "1") + client.HSet(ctx, "doc2", "field", "aab", "text", "2", "numeric", 2, "geo", "2,2", "tag", "2") + res1, err := client.FTSearch(ctx, "idx", "@text:aa*").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(0))) + res2, err := client.FTSearch(ctx, "idx", "@field:aa*").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res2.Total).To(BeEquivalentTo(int64(2))) + res3, err := client.FTSearchWithArgs(ctx, "idx", "*", &redis.FTSearchOptions{SortBy: []redis.FTSearchSortBy{{FieldName: "text", Desc: true}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res3.Total).To(BeEquivalentTo(int64(2))) + Expect(res3.Docs[0].ID).To(BeEquivalentTo("doc2")) + res4, err := client.FTSearchWithArgs(ctx, "idx", "*", &redis.FTSearchOptions{SortBy: []redis.FTSearchSortBy{{FieldName: "text", Asc: true}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res4.Total).To(BeEquivalentTo(int64(2))) + Expect(res4.Docs[0].ID).To(BeEquivalentTo("doc1")) + res5, err := client.FTSearchWithArgs(ctx, "idx", "*", &redis.FTSearchOptions{SortBy: []redis.FTSearchSortBy{{FieldName: "numeric", Asc: true}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res5.Docs[0].ID).To(BeEquivalentTo("doc1")) + res6, err := client.FTSearchWithArgs(ctx, "idx", "*", &redis.FTSearchOptions{SortBy: []redis.FTSearchSortBy{{FieldName: "geo", Asc: true}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res6.Docs[0].ID).To(BeEquivalentTo("doc1")) + res7, err := client.FTSearchWithArgs(ctx, "idx", "*", &redis.FTSearchOptions{SortBy: []redis.FTSearchSortBy{{FieldName: "tag", Asc: true}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res7.Docs[0].ID).To(BeEquivalentTo("doc1")) + + }) + + It("should FTExplain", Label("search", "ftexplain"), func() { + text1 := &redis.FieldSchema{FieldName: "f1", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "f2", FieldType: redis.SearchFieldTypeText} + text3 := &redis.FieldSchema{FieldName: "f3", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "txt", &redis.FTCreateOptions{}, text1, text2, text3).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "txt") + res1, err := client.FTExplain(ctx, "txt", "@f3:f3_val @f2:f2_val @f1:f1_val").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1).ToNot(BeEmpty()) + + }) + + It("should FTAlias", Label("search", "ftexplain"), func() { + text1 := &redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText} + val1, err := client.FTCreate(ctx, "testAlias", &redis.FTCreateOptions{Prefix: []interface{}{"index1:"}}, text1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val1).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "testAlias") + val2, err := client.FTCreate(ctx, "testAlias2", &redis.FTCreateOptions{Prefix: []interface{}{"index2:"}}, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val2).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "testAlias2") + + client.HSet(ctx, "index1:lonestar", "name", "lonestar") + client.HSet(ctx, "index2:yogurt", "name", "yogurt") + + res1, err := client.FTSearch(ctx, "testAlias", "*").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("index1:lonestar")) + + aliasAddRes, err := client.FTAliasAdd(ctx, "testAlias", "mj23").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(aliasAddRes).To(BeEquivalentTo("OK")) + + res1, err = client.FTSearch(ctx, "mj23", "*").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("index1:lonestar")) + + aliasUpdateRes, err := client.FTAliasUpdate(ctx, "testAlias2", "kb24").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(aliasUpdateRes).To(BeEquivalentTo("OK")) + + res3, err := client.FTSearch(ctx, "kb24", "*").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res3.Docs[0].ID).To(BeEquivalentTo("index2:yogurt")) + + aliasDelRes, err := client.FTAliasDel(ctx, "mj23").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(aliasDelRes).To(BeEquivalentTo("OK")) + + }) + + It("should FTCreate and FTSearch textfield, sortable and nostem ", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText, Sortable: true, NoStem: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + resInfo, err := client.FTInfo(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resInfo.Attributes[0].Sortable).To(BeTrue()) + Expect(resInfo.Attributes[0].NoStem).To(BeTrue()) + + }) + + It("should FTAlter", Label("search", "ftcreate", "ftsearch", "ftalter"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + resAlter, err := client.FTAlter(ctx, "idx1", false, []interface{}{"body", redis.SearchFieldTypeText.String()}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resAlter).To(BeEquivalentTo("OK")) + + client.HSet(ctx, "doc1", "title", "MyTitle", "body", "Some content only in the body") + res1, err := client.FTSearch(ctx, "idx1", "only in the body").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(1))) + + }) + + It("should FTSpellCheck", Label("search", "ftcreate", "ftsearch", "ftspellcheck"), func() { + text1 := &redis.FieldSchema{FieldName: "f1", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "f2", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "f1", "some valid content", "f2", "this is sample text") + client.HSet(ctx, "doc2", "f1", "very important", "f2", "lorem ipsum") + + resSpellCheck, err := client.FTSpellCheck(ctx, "idx1", "impornant").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSpellCheck[0].Suggestions[0].Suggestion).To(BeEquivalentTo("important")) + + resSpellCheck2, err := client.FTSpellCheck(ctx, "idx1", "contnt").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSpellCheck2[0].Suggestions[0].Suggestion).To(BeEquivalentTo("content")) + + // test spellcheck with Levenshtein distance + resSpellCheck3, err := client.FTSpellCheck(ctx, "idx1", "vlis").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSpellCheck3[0].Term).To(BeEquivalentTo("vlis")) + + resSpellCheck4, err := client.FTSpellCheckWithArgs(ctx, "idx1", "vlis", &redis.FTSpellCheckOptions{Distance: 2}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSpellCheck4[0].Suggestions[0].Suggestion).To(BeEquivalentTo("valid")) + + // test spellcheck include + resDictAdd, err := client.FTDictAdd(ctx, "dict", "lore", "lorem", "lorm").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDictAdd).To(BeEquivalentTo(3)) + terms := &redis.FTSpellCheckTerms{Inclusion: "INCLUDE", Dictionary: "dict"} + resSpellCheck5, err := client.FTSpellCheckWithArgs(ctx, "idx1", "lorm", &redis.FTSpellCheckOptions{Terms: terms}).Result() + Expect(err).NotTo(HaveOccurred()) + lorm := resSpellCheck5[0].Suggestions + Expect(len(lorm)).To(BeEquivalentTo(3)) + Expect(lorm[0].Score).To(BeEquivalentTo(0.5)) + Expect(lorm[1].Score).To(BeEquivalentTo(0)) + Expect(lorm[2].Score).To(BeEquivalentTo(0)) + + terms2 := &redis.FTSpellCheckTerms{Inclusion: "EXCLUDE", Dictionary: "dict"} + resSpellCheck6, err := client.FTSpellCheckWithArgs(ctx, "idx1", "lorm", &redis.FTSpellCheckOptions{Terms: terms2}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSpellCheck6).To(BeEmpty()) + }) + + It("should FTDict opreations", Label("search", "ftdictdump", "ftdictdel", "ftdictadd"), func() { + text1 := &redis.FieldSchema{FieldName: "f1", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "f2", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + resDictAdd, err := client.FTDictAdd(ctx, "custom_dict", "item1", "item2", "item3").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDictAdd).To(BeEquivalentTo(3)) + + resDictDel, err := client.FTDictDel(ctx, "custom_dict", "item2").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDictDel).To(BeEquivalentTo(1)) + + resDictDump, err := client.FTDictDump(ctx, "custom_dict").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDictDump).To(BeEquivalentTo([]string{"item1", "item3"})) + + resDictDel2, err := client.FTDictDel(ctx, "custom_dict", "item1", "item3").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDictDel2).To(BeEquivalentTo(2)) + }) + + It("should FTSearch phonetic matcher", Label("search", "ftsearch"), func() { + text1 := &redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "name", "Jon") + client.HSet(ctx, "doc2", "name", "John") + + res1, err := client.FTSearch(ctx, "idx1", "Jon").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(1))) + Expect(res1.Docs[0].Fields["name"]).To(BeEquivalentTo("Jon")) + + client.FlushDB(ctx) + text2 := &redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText, PhoneticMatcher: "dm:en"} + val2, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val2).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "name", "Jon") + client.HSet(ctx, "doc2", "name", "John") + + res2, err := client.FTSearch(ctx, "idx1", "Jon").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res2.Total).To(BeEquivalentTo(int64(2))) + names := []interface{}{res2.Docs[0].Fields["name"], res2.Docs[1].Fields["name"]} + Expect(names).To(ContainElement("Jon")) + Expect(names).To(ContainElement("John")) + }) + + It("should FTSearch WithScores", Label("search", "ftsearch"), func() { + text1 := &redis.FieldSchema{FieldName: "description", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "description", "The quick brown fox jumps over the lazy dog") + client.HSet(ctx, "doc2", "description", "Quick alice was beginning to get very tired of sitting by her quick sister on the bank, and of having nothing to do.") + + res, err := client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeEquivalentTo(float64(1))) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true, Scorer: "TFIDF"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeEquivalentTo(float64(1))) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true, Scorer: "TFIDF.DOCNORM"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeEquivalentTo(0.14285714285714285)) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true, Scorer: "BM25"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeNumerically("<=", 0.22471909420069797)) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true, Scorer: "DISMAX"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeEquivalentTo(float64(2))) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true, Scorer: "DOCSCORE"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeEquivalentTo(float64(1))) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "quick", &redis.FTSearchOptions{WithScores: true, Scorer: "HAMMING"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(*res.Docs[0].Score).To(BeEquivalentTo(float64(0))) + }) + + It("should FTConfigSet and FTConfigGet ", Label("search", "ftconfigget", "ftconfigset", "NonRedisEnterprise"), func() { + val, err := client.FTConfigSet(ctx, "TIMEOUT", "100").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + + res, err := client.FTConfigGet(ctx, "*").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res["TIMEOUT"]).To(BeEquivalentTo("100")) + + res, err = client.FTConfigGet(ctx, "TIMEOUT").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(map[string]interface{}{"TIMEOUT": "100"})) + + }) + + It("should FTAggregate GroupBy ", Label("search", "ftaggregate"), func() { + text1 := &redis.FieldSchema{FieldName: "title", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "body", FieldType: redis.SearchFieldTypeText} + text3 := &redis.FieldSchema{FieldName: "parent", FieldType: redis.SearchFieldTypeText} + num := &redis.FieldSchema{FieldName: "random_num", FieldType: redis.SearchFieldTypeNumeric} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, text2, text3, num).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "search", "title", "RediSearch", + "body", "Redisearch impements a search engine on top of redis", + "parent", "redis", + "random_num", 10) + client.HSet(ctx, "ai", "title", "RedisAI", + "body", "RedisAI executes Deep Learning/Machine Learning models and managing their data.", + "parent", "redis", + "random_num", 3) + client.HSet(ctx, "json", "title", "RedisJson", + "body", "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.", + "parent", "redis", + "random_num", 8) + + reducer := redis.FTAggregateReducer{Reducer: redis.SearchCount} + options := &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err := client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliascount"]).To(BeEquivalentTo("3")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchCountDistinct, Args: []interface{}{"@title"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliascount_distincttitle"]).To(BeEquivalentTo("3")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchSum, Args: []interface{}{"@random_num"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliassumrandom_num"]).To(BeEquivalentTo("21")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchMin, Args: []interface{}{"@random_num"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliasminrandom_num"]).To(BeEquivalentTo("3")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchMax, Args: []interface{}{"@random_num"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliasmaxrandom_num"]).To(BeEquivalentTo("10")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchAvg, Args: []interface{}{"@random_num"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliasavgrandom_num"]).To(BeEquivalentTo("7")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchStdDev, Args: []interface{}{"@random_num"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliasstddevrandom_num"]).To(BeEquivalentTo("3.60555127546")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchQuantile, Args: []interface{}{"@random_num", 0.5}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliasquantilerandom_num,0.5"]).To(BeEquivalentTo("8")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchToList, Args: []interface{}{"@title"}} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["__generated_aliastolisttitle"]).To(ContainElements("RediSearch", "RedisAI", "RedisJson")) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchFirstValue, Args: []interface{}{"@title"}, As: "first"} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["first"]).To(Or(BeEquivalentTo("RediSearch"), BeEquivalentTo("RedisAI"), BeEquivalentTo("RedisJson"))) + + reducer = redis.FTAggregateReducer{Reducer: redis.SearchRandomSample, Args: []interface{}{"@title", 2}, As: "random"} + options = &redis.FTAggregateOptions{GroupBy: []redis.FTAggregateGroupBy{{Fields: []interface{}{"@parent"}, Reduce: []redis.FTAggregateReducer{reducer}}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "redis", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["parent"]).To(BeEquivalentTo("redis")) + Expect(res.Rows[0].Fields["random"]).To(Or( + ContainElement("RediSearch"), + ContainElement("RedisAI"), + ContainElement("RedisJson"), + )) + + }) + + It("should FTAggregate sort and limit", Label("search", "ftaggregate"), func() { + text1 := &redis.FieldSchema{FieldName: "t1", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "t2", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "t1", "a", "t2", "b") + client.HSet(ctx, "doc2", "t1", "b", "t2", "a") + + options := &redis.FTAggregateOptions{SortBy: []redis.FTAggregateSortBy{{FieldName: "@t2", Asc: true}, {FieldName: "@t1", Desc: true}}} + res, err := client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t1"]).To(BeEquivalentTo("b")) + Expect(res.Rows[1].Fields["t1"]).To(BeEquivalentTo("a")) + Expect(res.Rows[0].Fields["t2"]).To(BeEquivalentTo("a")) + Expect(res.Rows[1].Fields["t2"]).To(BeEquivalentTo("b")) + + options = &redis.FTAggregateOptions{SortBy: []redis.FTAggregateSortBy{{FieldName: "@t1"}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t1"]).To(BeEquivalentTo("a")) + Expect(res.Rows[1].Fields["t1"]).To(BeEquivalentTo("b")) + + options = &redis.FTAggregateOptions{SortBy: []redis.FTAggregateSortBy{{FieldName: "@t1"}}, SortByMax: 1} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t1"]).To(BeEquivalentTo("a")) + + options = &redis.FTAggregateOptions{SortBy: []redis.FTAggregateSortBy{{FieldName: "@t1"}}, Limit: 1, LimitOffset: 1} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t1"]).To(BeEquivalentTo("b")) + }) + + It("should FTAggregate load ", Label("search", "ftaggregate"), func() { + text1 := &redis.FieldSchema{FieldName: "t1", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "t2", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "t1", "hello", "t2", "world") + + options := &redis.FTAggregateOptions{Load: []redis.FTAggregateLoad{{Field: "t1"}}} + res, err := client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t1"]).To(BeEquivalentTo("hello")) + + options = &redis.FTAggregateOptions{Load: []redis.FTAggregateLoad{{Field: "t2"}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t2"]).To(BeEquivalentTo("world")) + + options = &redis.FTAggregateOptions{LoadAll: true} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["t1"]).To(BeEquivalentTo("hello")) + Expect(res.Rows[0].Fields["t2"]).To(BeEquivalentTo("world")) + }) + + It("should FTAggregate apply", Label("search", "ftaggregate"), func() { + text1 := &redis.FieldSchema{FieldName: "PrimaryKey", FieldType: redis.SearchFieldTypeText, Sortable: true} + num1 := &redis.FieldSchema{FieldName: "CreatedDateTimeUTC", FieldType: redis.SearchFieldTypeNumeric, Sortable: true} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, num1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "PrimaryKey", "9::362330", "CreatedDateTimeUTC", "637387878524969984") + client.HSet(ctx, "doc2", "PrimaryKey", "9::362329", "CreatedDateTimeUTC", "637387875859270016") + + options := &redis.FTAggregateOptions{Apply: []redis.FTAggregateApply{{Field: "@CreatedDateTimeUTC * 10", As: "CreatedDateTimeUTC"}}} + res, err := client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Rows[0].Fields["CreatedDateTimeUTC"]).To(Or(BeEquivalentTo("6373878785249699840"), BeEquivalentTo("6373878758592700416"))) + Expect(res.Rows[1].Fields["CreatedDateTimeUTC"]).To(Or(BeEquivalentTo("6373878785249699840"), BeEquivalentTo("6373878758592700416"))) + + }) + + It("should FTAggregate filter", Label("search", "ftaggregate"), func() { + text1 := &redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText, Sortable: true} + num1 := &redis.FieldSchema{FieldName: "age", FieldType: redis.SearchFieldTypeNumeric, Sortable: true} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, text1, num1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "name", "bar", "age", "25") + client.HSet(ctx, "doc2", "name", "foo", "age", "19") + + for _, dlc := range []int{1, 2} { + options := &redis.FTAggregateOptions{Filter: "@name=='foo' && @age < 20", DialectVersion: dlc} + res, err := client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(Or(BeEquivalentTo(2), BeEquivalentTo(1))) + Expect(res.Rows[0].Fields["name"]).To(BeEquivalentTo("foo")) + + options = &redis.FTAggregateOptions{Filter: "@age > 15", DialectVersion: dlc, SortBy: []redis.FTAggregateSortBy{{FieldName: "@age"}}} + res, err = client.FTAggregateWithArgs(ctx, "idx1", "*", options).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(2)) + Expect(res.Rows[0].Fields["age"]).To(BeEquivalentTo("19")) + Expect(res.Rows[1].Fields["age"]).To(BeEquivalentTo("25")) + } + + }) + + It("should FTSearch SkipInitalScan", Label("search", "ftsearch"), func() { + client.HSet(ctx, "doc1", "foo", "bar") + + text1 := &redis.FieldSchema{FieldName: "foo", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{SkipInitalScan: true}, text1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + res, err := client.FTSearch(ctx, "idx1", "@foo:bar").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(int64(0))) + }) + + It("should FTCreate json", Label("search", "ftcreate"), func() { + + text1 := &redis.FieldSchema{FieldName: "$.name", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnJSON: true, Prefix: []interface{}{"king:"}}, text1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.JSONSet(ctx, "king:1", "$", `{"name": "henry"}`) + client.JSONSet(ctx, "king:2", "$", `{"name": "james"}`) + + res, err := client.FTSearch(ctx, "idx1", "henry").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("king:1")) + Expect(res.Docs[0].Fields["$"]).To(BeEquivalentTo(`{"name":"henry"}`)) + }) + + It("should FTCreate json fields as names", Label("search", "ftcreate"), func() { + + text1 := &redis.FieldSchema{FieldName: "$.name", FieldType: redis.SearchFieldTypeText, As: "name"} + num1 := &redis.FieldSchema{FieldName: "$.age", FieldType: redis.SearchFieldTypeNumeric, As: "just_a_number"} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnJSON: true}, text1, num1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.JSONSet(ctx, "doc:1", "$", `{"name": "Jon", "age": 25}`) + + res, err := client.FTSearchWithArgs(ctx, "idx1", "Jon", &redis.FTSearchOptions{Return: []redis.FTSearchReturn{{FieldName: "name"}, {FieldName: "just_a_number"}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("doc:1")) + Expect(res.Docs[0].Fields["name"]).To(BeEquivalentTo("Jon")) + Expect(res.Docs[0].Fields["just_a_number"]).To(BeEquivalentTo("25")) + }) + + It("should FTCreate CaseSensitive", Label("search", "ftcreate"), func() { + + tag1 := &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeTag, CaseSensitive: false} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, tag1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "1", "t", "HELLO") + client.HSet(ctx, "2", "t", "hello") + + res, err := client.FTSearch(ctx, "idx1", "@t:{HELLO}").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(2)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("1")) + Expect(res.Docs[1].ID).To(BeEquivalentTo("2")) + + resDrop, err := client.FTDropIndex(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDrop).To(BeEquivalentTo("OK")) + + tag2 := &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeTag, CaseSensitive: true} + val, err = client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, tag2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + res, err = client.FTSearch(ctx, "idx1", "@t:{HELLO}").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("1")) + + }) + + It("should FTSearch ReturnFields", Label("search", "ftsearch"), func() { + resJson, err := client.JSONSet(ctx, "doc:1", "$", `{"t": "riceratops","t2": "telmatosaurus", "n": 9072, "flt": 97.2}`).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resJson).To(BeEquivalentTo("OK")) + + text1 := &redis.FieldSchema{FieldName: "$.t", FieldType: redis.SearchFieldTypeText} + num1 := &redis.FieldSchema{FieldName: "$.flt", FieldType: redis.SearchFieldTypeNumeric} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnJSON: true}, text1, num1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + res, err := client.FTSearchWithArgs(ctx, "idx1", "*", &redis.FTSearchOptions{Return: []redis.FTSearchReturn{{FieldName: "$.t", As: "txt"}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("doc:1")) + Expect(res.Docs[0].Fields["txt"]).To(BeEquivalentTo("riceratops")) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "*", &redis.FTSearchOptions{Return: []redis.FTSearchReturn{{FieldName: "$.t2", As: "txt"}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("doc:1")) + Expect(res.Docs[0].Fields["txt"]).To(BeEquivalentTo("telmatosaurus")) + }) + + It("should FTSynUpdate", Label("search", "ftsynupdate"), func() { + + text1 := &redis.FieldSchema{FieldName: "title", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "body", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnHash: true}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + resSynUpdate, err := client.FTSynUpdateWithArgs(ctx, "idx1", "id1", &redis.FTSynUpdateOptions{SkipInitialScan: true}, []interface{}{"boy", "child", "offspring"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSynUpdate).To(BeEquivalentTo("OK")) + client.HSet(ctx, "doc1", "title", "he is a baby", "body", "this is a test") + + resSynUpdate, err = client.FTSynUpdateWithArgs(ctx, "idx1", "id1", &redis.FTSynUpdateOptions{SkipInitialScan: true}, []interface{}{"baby"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSynUpdate).To(BeEquivalentTo("OK")) + client.HSet(ctx, "doc2", "title", "he is another baby", "body", "another test") + + res, err := client.FTSearchWithArgs(ctx, "idx1", "child", &redis.FTSearchOptions{Expander: "SYNONYM"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Docs[0].ID).To(BeEquivalentTo("doc2")) + Expect(res.Docs[0].Fields["title"]).To(BeEquivalentTo("he is another baby")) + Expect(res.Docs[0].Fields["body"]).To(BeEquivalentTo("another test")) + }) + + It("should FTSynDump", Label("search", "ftsyndump"), func() { + + text1 := &redis.FieldSchema{FieldName: "title", FieldType: redis.SearchFieldTypeText} + text2 := &redis.FieldSchema{FieldName: "body", FieldType: redis.SearchFieldTypeText} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnHash: true}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + resSynUpdate, err := client.FTSynUpdate(ctx, "idx1", "id1", []interface{}{"boy", "child", "offspring"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSynUpdate).To(BeEquivalentTo("OK")) + + resSynUpdate, err = client.FTSynUpdate(ctx, "idx1", "id1", []interface{}{"baby", "child"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSynUpdate).To(BeEquivalentTo("OK")) + + resSynUpdate, err = client.FTSynUpdate(ctx, "idx1", "id1", []interface{}{"tree", "wood"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSynUpdate).To(BeEquivalentTo("OK")) + + resSynDump, err := client.FTSynDump(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resSynDump[0].Term).To(BeEquivalentTo("baby")) + Expect(resSynDump[0].Synonyms).To(BeEquivalentTo([]string{"id1"})) + Expect(resSynDump[1].Term).To(BeEquivalentTo("wood")) + Expect(resSynDump[1].Synonyms).To(BeEquivalentTo([]string{"id1"})) + Expect(resSynDump[2].Term).To(BeEquivalentTo("boy")) + Expect(resSynDump[2].Synonyms).To(BeEquivalentTo([]string{"id1"})) + Expect(resSynDump[3].Term).To(BeEquivalentTo("tree")) + Expect(resSynDump[3].Synonyms).To(BeEquivalentTo([]string{"id1"})) + Expect(resSynDump[4].Term).To(BeEquivalentTo("child")) + Expect(resSynDump[4].Synonyms).To(Or(BeEquivalentTo([]string{"id1"}), BeEquivalentTo([]string{"id1", "id1"}))) + Expect(resSynDump[5].Term).To(BeEquivalentTo("offspring")) + Expect(resSynDump[5].Synonyms).To(BeEquivalentTo([]string{"id1"})) + + }) + + It("should FTCreate json with alias", Label("search", "ftcreate"), func() { + + text1 := &redis.FieldSchema{FieldName: "$.name", FieldType: redis.SearchFieldTypeText, As: "name"} + num1 := &redis.FieldSchema{FieldName: "$.num", FieldType: redis.SearchFieldTypeNumeric, As: "num"} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnJSON: true, Prefix: []interface{}{"king:"}}, text1, num1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.JSONSet(ctx, "king:1", "$", `{"name": "henry", "num": 42}`) + client.JSONSet(ctx, "king:2", "$", `{"name": "james", "num": 3.14}`) + + res, err := client.FTSearch(ctx, "idx1", "@name:henry").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("king:1")) + Expect(res.Docs[0].Fields["$"]).To(BeEquivalentTo(`{"name":"henry","num":42}`)) + + res, err = client.FTSearch(ctx, "idx1", "@num:[0 10]").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("king:2")) + Expect(res.Docs[0].Fields["$"]).To(BeEquivalentTo(`{"name":"james","num":3.14}`)) + }) + + It("should FTCreate json with multipath", Label("search", "ftcreate"), func() { + + tag1 := &redis.FieldSchema{FieldName: "$..name", FieldType: redis.SearchFieldTypeTag, As: "name"} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnJSON: true, Prefix: []interface{}{"king:"}}, tag1).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.JSONSet(ctx, "king:1", "$", `{"name": "henry", "country": {"name": "england"}}`) + + res, err := client.FTSearch(ctx, "idx1", "@name:{england}").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("king:1")) + Expect(res.Docs[0].Fields["$"]).To(BeEquivalentTo(`{"name":"henry","country":{"name":"england"}}`)) + }) + + It("should FTCreate json with jsonpath", Label("search", "ftcreate"), func() { + + text1 := &redis.FieldSchema{FieldName: `$["prod:name"]`, FieldType: redis.SearchFieldTypeText, As: "name"} + text2 := &redis.FieldSchema{FieldName: `$.prod:name`, FieldType: redis.SearchFieldTypeText, As: "name_unsupported"} + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{OnJSON: true}, text1, text2).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.JSONSet(ctx, "doc:1", "$", `{"prod:name": "RediSearch"}`) + + res, err := client.FTSearch(ctx, "idx1", "@name:RediSearch").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("doc:1")) + Expect(res.Docs[0].Fields["$"]).To(BeEquivalentTo(`{"prod:name":"RediSearch"}`)) + + res, err = client.FTSearch(ctx, "idx1", "@name_unsupported:RediSearch").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + + res, err = client.FTSearchWithArgs(ctx, "idx1", "@name:RediSearch", &redis.FTSearchOptions{Return: []redis.FTSearchReturn{{FieldName: "name"}}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Total).To(BeEquivalentTo(1)) + Expect(res.Docs[0].ID).To(BeEquivalentTo("doc:1")) + Expect(res.Docs[0].Fields["name"]).To(BeEquivalentTo("RediSearch")) + + }) + + It("should FTCreate VECTOR", Label("search", "ftcreate"), func() { + hnswOptions := &redis.FTHNSWOptions{Type: "FLOAT32", Dim: 2, DistanceMetric: "L2"} + val, err := client.FTCreate(ctx, "idx1", + &redis.FTCreateOptions{}, + &redis.FieldSchema{FieldName: "v", FieldType: redis.SearchFieldTypeVector, VectorArgs: &redis.FTVectorArgs{HNSWOptions: hnswOptions}}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "a", "v", "aaaaaaaa") + client.HSet(ctx, "b", "v", "aaaabaaa") + client.HSet(ctx, "c", "v", "aaaaabaa") + + searchOptions := &redis.FTSearchOptions{ + Return: []redis.FTSearchReturn{{FieldName: "__v_score"}}, + SortBy: []redis.FTSearchSortBy{{FieldName: "__v_score", Asc: true}}, + DialectVersion: 2, + Params: map[string]interface{}{"vec": "aaaaaaaa"}, + } + res, err := client.FTSearchWithArgs(ctx, "idx1", "*=>[KNN 2 @v $vec]", searchOptions).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Docs[0].ID).To(BeEquivalentTo("a")) + Expect(res.Docs[0].Fields["__v_score"]).To(BeEquivalentTo("0")) + }) + + It("should FTCreate and FTSearch text params", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "name", FieldType: redis.SearchFieldTypeText}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "name", "Alice") + client.HSet(ctx, "doc2", "name", "Bob") + client.HSet(ctx, "doc3", "name", "Carol") + + res1, err := client.FTSearchWithArgs(ctx, "idx1", "@name:($name1 | $name2 )", &redis.FTSearchOptions{Params: map[string]interface{}{"name1": "Alice", "name2": "Bob"}, DialectVersion: 2}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(2))) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("doc1")) + Expect(res1.Docs[1].ID).To(BeEquivalentTo("doc2")) + + }) + + It("should FTCreate and FTSearch numeric params", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "numval", FieldType: redis.SearchFieldTypeNumeric}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "numval", 101) + client.HSet(ctx, "doc2", "numval", 102) + client.HSet(ctx, "doc3", "numval", 103) + + res1, err := client.FTSearchWithArgs(ctx, "idx1", "@numval:[$min $max]", &redis.FTSearchOptions{Params: map[string]interface{}{"min": 101, "max": 102}, DialectVersion: 2}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(2))) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("doc1")) + Expect(res1.Docs[1].ID).To(BeEquivalentTo("doc2")) + + }) + + It("should FTCreate and FTSearch geo params", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "g", FieldType: redis.SearchFieldTypeGeo}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "doc1", "g", "29.69465, 34.95126") + client.HSet(ctx, "doc2", "g", "29.69350, 34.94737") + client.HSet(ctx, "doc3", "g", "29.68746, 34.94882") + + res1, err := client.FTSearchWithArgs(ctx, "idx1", "@g:[$lon $lat $radius $units]", &redis.FTSearchOptions{Params: map[string]interface{}{"lat": "34.95126", "lon": "29.69465", "radius": 1000, "units": "km"}, DialectVersion: 2}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(3))) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("doc1")) + Expect(res1.Docs[1].ID).To(BeEquivalentTo("doc2")) + Expect(res1.Docs[2].ID).To(BeEquivalentTo("doc3")) + + }) + + It("should FTConfigSet and FTConfigGet dialect", Label("search", "ftconfigget", "ftconfigset", "NonRedisEnterprise"), func() { + res, err := client.FTConfigSet(ctx, "DEFAULT_DIALECT", "1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo("OK")) + + defDialect, err := client.FTConfigGet(ctx, "DEFAULT_DIALECT").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(defDialect).To(BeEquivalentTo(map[string]interface{}{"DEFAULT_DIALECT": "1"})) + + res, err = client.FTConfigSet(ctx, "DEFAULT_DIALECT", "2").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo("OK")) + + defDialect, err = client.FTConfigGet(ctx, "DEFAULT_DIALECT").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(defDialect).To(BeEquivalentTo(map[string]interface{}{"DEFAULT_DIALECT": "2"})) + }) + + It("should FTCreate WithSuffixtrie", Label("search", "ftcreate", "ftinfo"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + res, err := client.FTInfo(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Attributes[0].Attribute).To(BeEquivalentTo("txt")) + + resDrop, err := client.FTDropIndex(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDrop).To(BeEquivalentTo("OK")) + + // create withsuffixtrie index - text field + val, err = client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "txt", FieldType: redis.SearchFieldTypeText, WithSuffixtrie: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + res, err = client.FTInfo(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Attributes[0].WithSuffixtrie).To(BeTrue()) + + resDrop, err = client.FTDropIndex(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resDrop).To(BeEquivalentTo("OK")) + + // create withsuffixtrie index - tag field + val, err = client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeTag, WithSuffixtrie: true}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + res, err = client.FTInfo(ctx, "idx1").Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res.Attributes[0].WithSuffixtrie).To(BeTrue()) + }) + + It("should FTCreate GeoShape", Label("search", "ftcreate", "ftsearch"), func() { + val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "geom", FieldType: redis.SearchFieldTypeGeoShape, GeoShapeFieldType: "FLAT"}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(BeEquivalentTo("OK")) + WaitForIndexing(client, "idx1") + + client.HSet(ctx, "small", "geom", "POLYGON((1 1, 1 100, 100 100, 100 1, 1 1))") + client.HSet(ctx, "large", "geom", "POLYGON((1 1, 1 200, 200 200, 200 1, 1 1))") + + res1, err := client.FTSearchWithArgs(ctx, "idx1", "@geom:[WITHIN $poly]", + &redis.FTSearchOptions{ + DialectVersion: 3, + Params: map[string]interface{}{"poly": "POLYGON((0 0, 0 150, 150 150, 150 0, 0 0))"}, + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res1.Total).To(BeEquivalentTo(int64(1))) + Expect(res1.Docs[0].ID).To(BeEquivalentTo("small")) + + res2, err := client.FTSearchWithArgs(ctx, "idx1", "@geom:[CONTAINS $poly]", + &redis.FTSearchOptions{ + DialectVersion: 3, + Params: map[string]interface{}{"poly": "POLYGON((2 2, 2 50, 50 50, 50 2, 2 2))"}, + }).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res2.Total).To(BeEquivalentTo(int64(2))) + }) +}) + +// It("should FTProfile Search and Aggregate", Label("search", "ftprofile"), func() { +// val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeText}).Result() +// Expect(err).NotTo(HaveOccurred()) +// Expect(val).To(BeEquivalentTo("OK")) +// WaitForIndexing(client, "idx1") + +// client.HSet(ctx, "1", "t", "hello") +// client.HSet(ctx, "2", "t", "world") + +// // FTProfile Search +// query := redis.FTSearchQuery("hello|world", &redis.FTSearchOptions{NoContent: true}) +// res1, err := client.FTProfile(ctx, "idx1", false, query).Result() +// Expect(err).NotTo(HaveOccurred()) +// panic(res1) +// Expect(len(res1["results"].([]interface{}))).To(BeEquivalentTo(3)) +// resProfile := res1["profile"].(map[interface{}]interface{}) +// Expect(resProfile["Parsing time"].(float64) < 0.5).To(BeTrue()) +// iterProfile0 := resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{}) +// Expect(iterProfile0["Counter"]).To(BeEquivalentTo(2.0)) +// Expect(iterProfile0["Type"]).To(BeEquivalentTo("UNION")) + +// // FTProfile Aggregate +// aggQuery := redis.FTAggregateQuery("*", &redis.FTAggregateOptions{ +// Load: []redis.FTAggregateLoad{{Field: "t"}}, +// Apply: []redis.FTAggregateApply{{Field: "startswith(@t, 'hel')", As: "prefix"}}}) +// res2, err := client.FTProfile(ctx, "idx1", false, aggQuery).Result() +// Expect(err).NotTo(HaveOccurred()) +// Expect(len(res2["results"].([]interface{}))).To(BeEquivalentTo(2)) +// resProfile = res2["profile"].(map[interface{}]interface{}) +// iterProfile0 = resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{}) +// Expect(iterProfile0["Counter"]).To(BeEquivalentTo(2)) +// Expect(iterProfile0["Type"]).To(BeEquivalentTo("WILDCARD")) +// }) + +// It("should FTProfile Search Limited", Label("search", "ftprofile"), func() { +// val, err := client.FTCreate(ctx, "idx1", &redis.FTCreateOptions{}, &redis.FieldSchema{FieldName: "t", FieldType: redis.SearchFieldTypeText}).Result() +// Expect(err).NotTo(HaveOccurred()) +// Expect(val).To(BeEquivalentTo("OK")) +// WaitForIndexing(client, "idx1") + +// client.HSet(ctx, "1", "t", "hello") +// client.HSet(ctx, "2", "t", "hell") +// client.HSet(ctx, "3", "t", "help") +// client.HSet(ctx, "4", "t", "helowa") + +// // FTProfile Search +// query := redis.FTSearchQuery("%hell% hel*", &redis.FTSearchOptions{}) +// res1, err := client.FTProfile(ctx, "idx1", true, query).Result() +// Expect(err).NotTo(HaveOccurred()) +// resProfile := res1["profile"].(map[interface{}]interface{}) +// iterProfile0 := resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{}) +// Expect(iterProfile0["Type"]).To(BeEquivalentTo("INTERSECT")) +// Expect(len(res1["results"].([]interface{}))).To(BeEquivalentTo(3)) +// Expect(iterProfile0["Child iterators"].([]interface{})[0].(map[interface{}]interface{})["Child iterators"]).To(BeEquivalentTo("The number of iterators in the union is 3")) +// Expect(iterProfile0["Child iterators"].([]interface{})[1].(map[interface{}]interface{})["Child iterators"]).To(BeEquivalentTo("The number of iterators in the union is 4")) +// }) + +// It("should FTProfile Search query params", Label("search", "ftprofile"), func() { +// hnswOptions := &redis.FTHNSWOptions{Type: "FLOAT32", Dim: 2, DistanceMetric: "L2"} +// val, err := client.FTCreate(ctx, "idx1", +// &redis.FTCreateOptions{}, +// &redis.FieldSchema{FieldName: "v", FieldType: redis.SearchFieldTypeVector, VectorArgs: &redis.FTVectorArgs{HNSWOptions: hnswOptions}}).Result() +// Expect(err).NotTo(HaveOccurred()) +// Expect(val).To(BeEquivalentTo("OK")) +// WaitForIndexing(client, "idx1") + +// client.HSet(ctx, "a", "v", "aaaaaaaa") +// client.HSet(ctx, "b", "v", "aaaabaaa") +// client.HSet(ctx, "c", "v", "aaaaabaa") + +// // FTProfile Search +// searchOptions := &redis.FTSearchOptions{ +// Return: []redis.FTSearchReturn{{FieldName: "__v_score"}}, +// SortBy: []redis.FTSearchSortBy{{FieldName: "__v_score", Asc: true}}, +// DialectVersion: 2, +// Params: map[string]interface{}{"vec": "aaaaaaaa"}, +// } +// query := redis.FTSearchQuery("*=>[KNN 2 @v $vec]", searchOptions) +// res1, err := client.FTProfile(ctx, "idx1", false, query).Result() +// Expect(err).NotTo(HaveOccurred()) +// resProfile := res1["profile"].(map[interface{}]interface{}) +// iterProfile0 := resProfile["Iterators profile"].([]interface{})[0].(map[interface{}]interface{}) +// Expect(iterProfile0["Counter"]).To(BeEquivalentTo(2)) +// Expect(iterProfile0["Type"]).To(BeEquivalentTo(redis.SearchFieldTypeVector.String())) +// Expect(res1["total_results"]).To(BeEquivalentTo(2)) +// results0 := res1["results"].([]interface{})[0].(map[interface{}]interface{}) +// Expect(results0["id"]).To(BeEquivalentTo("a")) +// Expect(results0["extra_attributes"].(map[interface{}]interface{})["__v_score"]).To(BeEquivalentTo("0")) +// }) diff --git a/stream_commands.go b/stream_commands.go index cc1a6614..65e8745a 100644 --- a/stream_commands.go +++ b/stream_commands.go @@ -137,10 +137,11 @@ type XReadArgs struct { Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 Count int64 Block time.Duration + ID string } func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { - args := make([]interface{}, 0, 6+len(a.Streams)) + args := make([]interface{}, 0, 2*len(a.Streams)+6) args = append(args, "xread") keyPos := int8(1) @@ -159,6 +160,11 @@ func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { for _, s := range a.Streams { args = append(args, s) } + if a.ID != "" { + for range a.Streams { + args = append(args, a.ID) + } + } cmd := NewXStreamSliceCmd(ctx, args...) if a.Block >= 0 { diff --git a/timeseries_commands.go b/timeseries_commands.go index c6c0ec87..b7cf6360 100644 --- a/timeseries_commands.go +++ b/timeseries_commands.go @@ -40,25 +40,32 @@ type TimeseriesCmdable interface { } type TSOptions struct { - Retention int - ChunkSize int - Encoding string - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + Encoding string + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSIncrDecrOptions struct { - Timestamp int64 - Retention int - ChunkSize int - Uncompressed bool - Labels map[string]string + Timestamp int64 + Retention int + ChunkSize int + Uncompressed bool + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSAlterOptions struct { - Retention int - ChunkSize int - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSCreateRuleOptions struct { @@ -223,6 +230,9 @@ func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interf args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -264,6 +274,9 @@ func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOp args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -292,6 +305,9 @@ func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOption args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -351,12 +367,18 @@ func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -391,12 +413,18 @@ func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) diff --git a/timeseries_commands_test.go b/timeseries_commands_test.go index 563f24e7..c62367a7 100644 --- a/timeseries_commands_test.go +++ b/timeseries_commands_test.go @@ -23,7 +23,7 @@ var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { Expect(client.Close()).NotTo(HaveOccurred()) }) - It("should TSCreate and TSCreateWithArgs", Label("timeseries", "tscreate", "tscreateWithArgs"), func() { + It("should TSCreate and TSCreateWithArgs", Label("timeseries", "tscreate", "tscreateWithArgs", "NonRedisEnterprise"), func() { result, err := client.TSCreate(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) @@ -62,10 +62,60 @@ var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { resultInfo, err = client.TSInfo(ctx, keyName).Result() Expect(err).NotTo(HaveOccurred()) Expect(strings.ToUpper(resultInfo["duplicatePolicy"].(string))).To(BeEquivalentTo(dup)) - } + // Test insertion filters + opt = &redis.TSOptions{IgnoreMaxTimeDiff: 5, DuplicatePolicy: "LAST", IgnoreMaxValDiff: 10.0} + result, err = client.TSCreateWithArgs(ctx, "ts-if-1", opt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo("OK")) + resultAdd, err := client.TSAdd(ctx, "ts-if-1", 1000, 1.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1000)) + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1010, 11.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1010)) + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1013, 10.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1010)) + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1020, 11.5).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1020)) + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1021, 22.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1021)) + + rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1021).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(4)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{ + {Timestamp: 1000, Value: 1.0}, + {Timestamp: 1010, Value: 11.0}, + {Timestamp: 1020, Value: 11.5}, + {Timestamp: 1021, Value: 22.0}})) + // Test insertion filters with other duplicate policy + opt = &redis.TSOptions{IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0} + result, err = client.TSCreateWithArgs(ctx, "ts-if-2", opt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo("OK")) + resultAdd1, err := client.TSAdd(ctx, "ts-if-1", 1000, 1.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd1).To(BeEquivalentTo(1000)) + resultAdd1, err = client.TSAdd(ctx, "ts-if-1", 1010, 11.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd1).To(BeEquivalentTo(1010)) + resultAdd1, err = client.TSAdd(ctx, "ts-if-1", 1013, 10.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd1).To(BeEquivalentTo(1013)) + + rangePoints, err = client.TSRange(ctx, "ts-if-1", 1000, 1013).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(3)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{ + {Timestamp: 1000, Value: 1.0}, + {Timestamp: 1010, Value: 11.0}, + {Timestamp: 1013, Value: 10.0}})) }) - It("should TSAdd and TSAddWithArgs", Label("timeseries", "tsadd", "tsaddWithArgs"), func() { + It("should TSAdd and TSAddWithArgs", Label("timeseries", "tsadd", "tsaddWithArgs", "NonRedisEnterprise"), func() { result, err := client.TSAdd(ctx, "1", 1, 1).Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo(1)) @@ -138,9 +188,23 @@ var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { resultGet, err = client.TSGet(ctx, "tsami-1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultGet.Value).To(BeEquivalentTo(5)) + // Insertion filters + opt = &redis.TSOptions{IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} + result, err = client.TSAddWithArgs(ctx, "ts-if-1", 1000, 1.0, opt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo(1000)) + + result, err = client.TSAddWithArgs(ctx, "ts-if-1", 1004, 3.0, opt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo(1000)) + + rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1004).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(1)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: 1.0}})) }) - It("should TSAlter", Label("timeseries", "tsalter"), func() { + It("should TSAlter", Label("timeseries", "tsalter", "NonRedisEnterprise"), func() { result, err := client.TSCreate(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(result).To(BeEquivalentTo("OK")) @@ -179,6 +243,33 @@ var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { resultInfo, err = client.TSInfo(ctx, "1").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["duplicatePolicy"]).To(BeEquivalentTo("min")) + // Test insertion filters + resultAdd, err := client.TSAdd(ctx, "ts-if-1", 1000, 1.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1000)) + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1010, 11.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1010)) + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1013, 10.0).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1013)) + + alterOpt := &redis.TSAlterOptions{IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} + resultAlter, err = client.TSAlter(ctx, "ts-if-1", alterOpt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAlter).To(BeEquivalentTo("OK")) + + resultAdd, err = client.TSAdd(ctx, "ts-if-1", 1015, 11.5).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(resultAdd).To(BeEquivalentTo(1013)) + + rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1013).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(3)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{ + {Timestamp: 1000, Value: 1.0}, + {Timestamp: 1010, Value: 11.0}, + {Timestamp: 1013, Value: 10.0}})) }) It("should TSCreateRule and TSDeleteRule", Label("timeseries", "tscreaterule", "tsdeleterule"), func() { @@ -216,7 +307,7 @@ var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { Expect(resultInfo["rules"]).To(BeEquivalentTo(map[interface{}]interface{}{})) }) - It("should TSIncrBy, TSIncrByWithArgs, TSDecrBy and TSDecrByWithArgs", Label("timeseries", "tsincrby", "tsdecrby", "tsincrbyWithArgs", "tsdecrbyWithArgs"), func() { + It("should TSIncrBy, TSIncrByWithArgs, TSDecrBy and TSDecrByWithArgs", Label("timeseries", "tsincrby", "tsdecrby", "tsincrbyWithArgs", "tsdecrbyWithArgs", "NonRedisEnterprise"), func() { for i := 0; i < 100; i++ { _, err := client.TSIncrBy(ctx, "1", 1).Result() Expect(err).NotTo(HaveOccurred()) @@ -277,6 +368,54 @@ var _ = Describe("RedisTimeseries commands", Label("timeseries"), func() { resultInfo, err = client.TSInfo(ctx, "4").Result() Expect(err).NotTo(HaveOccurred()) Expect(resultInfo["chunkSize"]).To(BeEquivalentTo(128)) + + // Test insertion filters INCRBY + opt = &redis.TSIncrDecrOptions{Timestamp: 1000, IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} + res, err := client.TSIncrByWithArgs(ctx, "ts-if-1", 1.0, opt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(1000)) + + res, err = client.TSIncrByWithArgs(ctx, "ts-if-1", 3.0, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(1000)) + + rangePoints, err := client.TSRange(ctx, "ts-if-1", 1000, 1004).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(1)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: 1.0}})) + + res, err = client.TSIncrByWithArgs(ctx, "ts-if-1", 10.1, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(1000)) + + rangePoints, err = client.TSRange(ctx, "ts-if-1", 1000, 1004).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(1)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: 11.1}})) + + // Test insertion filters DECRBY + opt = &redis.TSIncrDecrOptions{Timestamp: 1000, IgnoreMaxTimeDiff: 5, IgnoreMaxValDiff: 10.0, DuplicatePolicy: "LAST"} + res, err = client.TSDecrByWithArgs(ctx, "ts-if-2", 1.0, opt).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(1000)) + + res, err = client.TSDecrByWithArgs(ctx, "ts-if-2", 3.0, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(1000)) + + rangePoints, err = client.TSRange(ctx, "ts-if-2", 1000, 1004).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(1)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: -1.0}})) + + res, err = client.TSDecrByWithArgs(ctx, "ts-if-2", 10.1, &redis.TSIncrDecrOptions{Timestamp: 1000}).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeEquivalentTo(1000)) + + rangePoints, err = client.TSRange(ctx, "ts-if-2", 1000, 1004).Result() + Expect(err).NotTo(HaveOccurred()) + Expect(len(rangePoints)).To(BeEquivalentTo(1)) + Expect(rangePoints).To(BeEquivalentTo([]redis.TSTimestampValue{{Timestamp: 1000, Value: -11.1}})) }) It("should TSGet", Label("timeseries", "tsget"), func() {