2018-10-11 00:25:40 +03:00
|
|
|
package collection
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
|
|
|
"reflect"
|
|
|
|
"strconv"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/tidwall/geojson"
|
|
|
|
"github.com/tidwall/geojson/geometry"
|
|
|
|
"github.com/tidwall/gjson"
|
|
|
|
)
|
|
|
|
|
|
|
|
func PO(x, y float64) *geojson.Point {
|
|
|
|
return geojson.NewPoint(geometry.Point{X: x, Y: y})
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
seed := time.Now().UnixNano()
|
|
|
|
println(seed)
|
|
|
|
rand.Seed(seed)
|
|
|
|
}
|
|
|
|
|
|
|
|
func expect(t testing.TB, expect bool) {
|
|
|
|
t.Helper()
|
|
|
|
if !expect {
|
|
|
|
t.Fatal("not what you expected")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func bounds(c *Collection) geometry.Rect {
|
|
|
|
minX, minY, maxX, maxY := c.Bounds()
|
|
|
|
return geometry.Rect{
|
|
|
|
Min: geometry.Point{X: minX, Y: minY},
|
|
|
|
Max: geometry.Point{X: maxX, Y: maxY},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCollectionNewCollection(t *testing.T) {
|
|
|
|
const numItems = 10000
|
|
|
|
objs := make(map[string]geojson.Object)
|
|
|
|
c := New()
|
|
|
|
for i := 0; i < numItems; i++ {
|
|
|
|
id := strconv.FormatInt(int64(i), 10)
|
2021-03-31 18:13:44 +03:00
|
|
|
obj := PO(rand.Float64()*360-180, rand.Float64()*180-90)
|
2018-10-11 00:25:40 +03:00
|
|
|
objs[id] = obj
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
c.Set(id, obj, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
count := 0
|
|
|
|
bbox := geometry.Rect{
|
|
|
|
Min: geometry.Point{X: -180, Y: -90},
|
|
|
|
Max: geometry.Point{X: 180, Y: 90},
|
|
|
|
}
|
|
|
|
c.geoSearch(bbox, func(id string, obj geojson.Object, field []float64) bool {
|
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if count != len(objs) {
|
|
|
|
t.Fatalf("count = %d, expect %d", count, len(objs))
|
|
|
|
}
|
|
|
|
count = c.Count()
|
|
|
|
if count != len(objs) {
|
|
|
|
t.Fatalf("c.Count() = %d, expect %d", count, len(objs))
|
|
|
|
}
|
|
|
|
testCollectionVerifyContents(t, c, objs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCollectionSet(t *testing.T) {
|
|
|
|
t.Run("AddString", func(t *testing.T) {
|
|
|
|
c := New()
|
|
|
|
str1 := String("hello")
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObject, oldFields, newFields := c.Set("str", str1, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObject == nil)
|
|
|
|
expect(t, len(oldFields) == 0)
|
|
|
|
expect(t, len(newFields) == 0)
|
|
|
|
})
|
|
|
|
t.Run("UpdateString", func(t *testing.T) {
|
|
|
|
c := New()
|
|
|
|
str1 := String("hello")
|
|
|
|
str2 := String("world")
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObject, oldFields, newFields := c.Set("str", str1, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObject == nil)
|
|
|
|
expect(t, len(oldFields) == 0)
|
|
|
|
expect(t, len(newFields) == 0)
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObject, oldFields, newFields = c.Set("str", str2, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObject == str1)
|
|
|
|
expect(t, len(oldFields) == 0)
|
|
|
|
expect(t, len(newFields) == 0)
|
|
|
|
})
|
|
|
|
t.Run("AddPoint", func(t *testing.T) {
|
|
|
|
c := New()
|
|
|
|
point1 := PO(-112.1, 33.1)
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObject, oldFields, newFields := c.Set("point", point1, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObject == nil)
|
|
|
|
expect(t, len(oldFields) == 0)
|
|
|
|
expect(t, len(newFields) == 0)
|
|
|
|
})
|
|
|
|
t.Run("UpdatePoint", func(t *testing.T) {
|
|
|
|
c := New()
|
|
|
|
point1 := PO(-112.1, 33.1)
|
|
|
|
point2 := PO(-112.2, 33.2)
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObject, oldFields, newFields := c.Set("point", point1, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObject == nil)
|
|
|
|
expect(t, len(oldFields) == 0)
|
|
|
|
expect(t, len(newFields) == 0)
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObject, oldFields, newFields = c.Set("point", point2, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObject == point1)
|
|
|
|
expect(t, len(oldFields) == 0)
|
|
|
|
expect(t, len(newFields) == 0)
|
|
|
|
})
|
|
|
|
t.Run("Fields", func(t *testing.T) {
|
|
|
|
c := New()
|
|
|
|
str1 := String("hello")
|
|
|
|
fNames := []string{"a", "b", "c"}
|
|
|
|
fValues := []float64{1, 2, 3}
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObj, oldFlds, newFlds := c.Set("str", str1, fNames, fValues, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObj == nil)
|
|
|
|
expect(t, len(oldFlds) == 0)
|
|
|
|
expect(t, reflect.DeepEqual(newFlds, fValues))
|
|
|
|
str2 := String("hello")
|
|
|
|
fNames = []string{"d", "e", "f"}
|
|
|
|
fValues = []float64{4, 5, 6}
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObj, oldFlds, newFlds = c.Set("str", str2, fNames, fValues, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObj == str1)
|
|
|
|
expect(t, reflect.DeepEqual(oldFlds, []float64{1, 2, 3}))
|
|
|
|
expect(t, reflect.DeepEqual(newFlds, []float64{1, 2, 3, 4, 5, 6}))
|
|
|
|
fValues = []float64{7, 8, 9, 10, 11, 12}
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
oldObj, oldFlds, newFlds = c.Set("str", str1, nil, fValues, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, oldObj == str2)
|
|
|
|
expect(t, reflect.DeepEqual(oldFlds, []float64{1, 2, 3, 4, 5, 6}))
|
|
|
|
expect(t, reflect.DeepEqual(newFlds, []float64{7, 8, 9, 10, 11, 12}))
|
|
|
|
})
|
|
|
|
t.Run("Delete", func(t *testing.T) {
|
|
|
|
c := New()
|
|
|
|
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
c.Set("1", String("1"), nil, nil, 0)
|
|
|
|
c.Set("2", String("2"), nil, nil, 0)
|
|
|
|
c.Set("3", PO(1, 2), nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
|
|
|
|
expect(t, c.Count() == 3)
|
|
|
|
expect(t, c.StringCount() == 2)
|
|
|
|
expect(t, c.PointCount() == 1)
|
|
|
|
expect(t, bounds(c) == geometry.Rect{
|
|
|
|
Min: geometry.Point{X: 1, Y: 2},
|
|
|
|
Max: geometry.Point{X: 1, Y: 2}})
|
|
|
|
var v geojson.Object
|
|
|
|
var ok bool
|
|
|
|
var flds []float64
|
|
|
|
var updated bool
|
|
|
|
var updateCount int
|
|
|
|
|
|
|
|
v, _, ok = c.Delete("2")
|
|
|
|
expect(t, v.String() == "2")
|
|
|
|
expect(t, ok)
|
|
|
|
expect(t, c.Count() == 2)
|
|
|
|
expect(t, c.StringCount() == 1)
|
|
|
|
expect(t, c.PointCount() == 1)
|
|
|
|
|
|
|
|
v, _, ok = c.Delete("1")
|
|
|
|
expect(t, v.String() == "1")
|
|
|
|
expect(t, ok)
|
|
|
|
expect(t, c.Count() == 1)
|
|
|
|
expect(t, c.StringCount() == 0)
|
|
|
|
expect(t, c.PointCount() == 1)
|
|
|
|
|
|
|
|
expect(t, len(c.FieldMap()) == 0)
|
|
|
|
|
2021-03-31 18:13:44 +03:00
|
|
|
_, flds, updated, ok = c.SetField("3", "hello", 123)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, ok)
|
|
|
|
expect(t, reflect.DeepEqual(flds, []float64{123}))
|
|
|
|
expect(t, updated)
|
|
|
|
expect(t, c.FieldMap()["hello"] == 0)
|
|
|
|
|
2021-03-31 18:13:44 +03:00
|
|
|
_, flds, updated, ok = c.SetField("3", "hello", 1234)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, ok)
|
|
|
|
expect(t, reflect.DeepEqual(flds, []float64{1234}))
|
|
|
|
expect(t, updated)
|
|
|
|
|
2021-03-31 18:13:44 +03:00
|
|
|
_, flds, updated, ok = c.SetField("3", "hello", 1234)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, ok)
|
|
|
|
expect(t, reflect.DeepEqual(flds, []float64{1234}))
|
|
|
|
expect(t, !updated)
|
|
|
|
|
2021-03-31 18:13:44 +03:00
|
|
|
_, flds, updateCount, ok = c.SetFields("3",
|
2018-10-11 00:25:40 +03:00
|
|
|
[]string{"planet", "world"}, []float64{55, 66})
|
|
|
|
expect(t, ok)
|
|
|
|
expect(t, reflect.DeepEqual(flds, []float64{1234, 55, 66}))
|
|
|
|
expect(t, updateCount == 2)
|
|
|
|
expect(t, c.FieldMap()["hello"] == 0)
|
|
|
|
expect(t, c.FieldMap()["planet"] == 1)
|
|
|
|
expect(t, c.FieldMap()["world"] == 2)
|
|
|
|
|
|
|
|
v, _, ok = c.Delete("3")
|
|
|
|
expect(t, v.String() == `{"type":"Point","coordinates":[1,2]}`)
|
|
|
|
expect(t, ok)
|
|
|
|
expect(t, c.Count() == 0)
|
|
|
|
expect(t, c.StringCount() == 0)
|
|
|
|
expect(t, c.PointCount() == 0)
|
|
|
|
v, _, ok = c.Delete("3")
|
|
|
|
expect(t, v == nil)
|
|
|
|
expect(t, !ok)
|
|
|
|
expect(t, c.Count() == 0)
|
|
|
|
expect(t, bounds(c) == geometry.Rect{})
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
v, _, _, ok = c.Get("3")
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, v == nil)
|
|
|
|
expect(t, !ok)
|
|
|
|
_, _, _, ok = c.SetField("3", "hello", 123)
|
|
|
|
expect(t, !ok)
|
|
|
|
_, _, _, ok = c.SetFields("3", []string{"hello"}, []float64{123})
|
|
|
|
expect(t, !ok)
|
|
|
|
expect(t, c.TotalWeight() == 0)
|
|
|
|
expect(t, c.FieldMap()["hello"] == 0)
|
|
|
|
expect(t, c.FieldMap()["planet"] == 1)
|
|
|
|
expect(t, c.FieldMap()["world"] == 2)
|
|
|
|
expect(t, reflect.DeepEqual(
|
|
|
|
c.FieldArr(), []string{"hello", "planet", "world"}),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCollectionScan(t *testing.T) {
|
|
|
|
N := 256
|
|
|
|
c := New()
|
|
|
|
for _, i := range rand.Perm(N) {
|
|
|
|
id := fmt.Sprintf("%04d", i)
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
c.Set(id, String(id), []string{"ex"}, []float64{float64(i)}, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
var n int
|
|
|
|
var prevID string
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Scan(false, nil, nil, func(id string, obj geojson.Object, fields []float64) bool {
|
2018-10-11 00:25:40 +03:00
|
|
|
if n > 0 {
|
|
|
|
expect(t, id > prevID)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[0])))
|
|
|
|
n++
|
|
|
|
prevID = id
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == c.Count())
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Scan(true, nil, nil, func(id string, obj geojson.Object, fields []float64) bool {
|
2018-10-11 00:25:40 +03:00
|
|
|
if n > 0 {
|
|
|
|
expect(t, id < prevID)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[0])))
|
|
|
|
n++
|
|
|
|
prevID = id
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == c.Count())
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.ScanRange("0060", "0070", false, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
if n > 0 {
|
|
|
|
expect(t, id > prevID)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[0])))
|
|
|
|
n++
|
|
|
|
prevID = id
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == 10)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.ScanRange("0070", "0060", true, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
if n > 0 {
|
|
|
|
expect(t, id < prevID)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[0])))
|
|
|
|
n++
|
|
|
|
prevID = id
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == 10)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.ScanGreaterOrEqual("0070", true, nil, nil,
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64, ex int64) bool {
|
2018-10-11 00:25:40 +03:00
|
|
|
if n > 0 {
|
|
|
|
expect(t, id < prevID)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[0])))
|
|
|
|
n++
|
|
|
|
prevID = id
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == 71)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.ScanGreaterOrEqual("0070", false, nil, nil,
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64, ex int64) bool {
|
2018-10-11 00:25:40 +03:00
|
|
|
if n > 0 {
|
|
|
|
expect(t, id > prevID)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[0])))
|
|
|
|
n++
|
|
|
|
prevID = id
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == c.Count()-70)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCollectionSearch(t *testing.T) {
|
|
|
|
N := 256
|
|
|
|
c := New()
|
|
|
|
for i, j := range rand.Perm(N) {
|
|
|
|
id := fmt.Sprintf("%04d", j)
|
|
|
|
ex := fmt.Sprintf("%04d", i)
|
|
|
|
c.Set(id, String(ex), []string{"i", "j"},
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
[]float64{float64(i), float64(j)}, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
var n int
|
|
|
|
var prevValue string
|
2019-04-24 15:09:41 +03:00
|
|
|
c.SearchValues(false, nil, nil, func(id string, obj geojson.Object, fields []float64) bool {
|
2018-10-11 00:25:40 +03:00
|
|
|
if n > 0 {
|
|
|
|
expect(t, obj.String() > prevValue)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[1])))
|
|
|
|
n++
|
|
|
|
prevValue = obj.String()
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == c.Count())
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.SearchValues(true, nil, nil, func(id string, obj geojson.Object, fields []float64) bool {
|
2018-10-11 00:25:40 +03:00
|
|
|
if n > 0 {
|
|
|
|
expect(t, obj.String() < prevValue)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[1])))
|
|
|
|
n++
|
|
|
|
prevValue = obj.String()
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == c.Count())
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.SearchValuesRange("0060", "0070", false, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
if n > 0 {
|
|
|
|
expect(t, obj.String() > prevValue)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[1])))
|
|
|
|
n++
|
|
|
|
prevValue = obj.String()
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == 10)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.SearchValuesRange("0070", "0060", true, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
if n > 0 {
|
|
|
|
expect(t, obj.String() < prevValue)
|
|
|
|
}
|
|
|
|
expect(t, id == fmt.Sprintf("%04d", int(fields[1])))
|
|
|
|
n++
|
|
|
|
prevValue = obj.String()
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
expect(t, n == 10)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCollectionWeight(t *testing.T) {
|
|
|
|
c := New()
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
c.Set("1", String("1"), nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, c.TotalWeight() > 0)
|
|
|
|
c.Delete("1")
|
|
|
|
expect(t, c.TotalWeight() == 0)
|
|
|
|
c.Set("1", String("1"),
|
|
|
|
[]string{"a", "b", "c"},
|
|
|
|
[]float64{1, 2, 3},
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
0,
|
2018-10-11 00:25:40 +03:00
|
|
|
)
|
|
|
|
expect(t, c.TotalWeight() > 0)
|
|
|
|
c.Delete("1")
|
|
|
|
expect(t, c.TotalWeight() == 0)
|
|
|
|
c.Set("1", String("1"),
|
|
|
|
[]string{"a", "b", "c"},
|
|
|
|
[]float64{1, 2, 3},
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
0,
|
2018-10-11 00:25:40 +03:00
|
|
|
)
|
|
|
|
c.Set("2", String("2"),
|
|
|
|
[]string{"d", "e", "f"},
|
|
|
|
[]float64{4, 5, 6},
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
0,
|
2018-10-11 00:25:40 +03:00
|
|
|
)
|
|
|
|
c.Set("1", String("1"),
|
|
|
|
[]string{"d", "e", "f"},
|
|
|
|
[]float64{4, 5, 6},
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
0,
|
2018-10-11 00:25:40 +03:00
|
|
|
)
|
|
|
|
c.Delete("1")
|
|
|
|
c.Delete("2")
|
|
|
|
expect(t, c.TotalWeight() == 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSpatialSearch(t *testing.T) {
|
|
|
|
json := `
|
|
|
|
{"type":"FeatureCollection","features":[
|
|
|
|
{"type":"Feature","id":"p1","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Point","coordinates":[-71.4743041992187,42.51867517417283]}},
|
|
|
|
{"type":"Feature","id":"p2","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Point","coordinates":[-71.4056396484375,42.50197174319114]}},
|
|
|
|
{"type":"Feature","id":"p3","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Point","coordinates":[-71.4619445800781,42.49437779897246]}},
|
|
|
|
{"type":"Feature","id":"p4","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Point","coordinates":[-71.4337921142578,42.53891577257117]}},
|
|
|
|
{"type":"Feature","id":"r1","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Polygon","coordinates":[[[-71.4279556274414,42.48804880765346],[-71.37439727783203,42.48804880765346],[-71.37439727783203,42.52322988064187],[-71.4279556274414,42.52322988064187],[-71.4279556274414,42.48804880765346]]]}},
|
|
|
|
{"type":"Feature","id":"r2","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Polygon","coordinates":[[[-71.4825439453125,42.53588010092859],[-71.45027160644531,42.53588010092859],[-71.45027160644531,42.55839115400447],[-71.4825439453125,42.55839115400447],[-71.4825439453125,42.53588010092859]]]}},
|
|
|
|
{"type":"Feature","id":"r3","properties":{"marker-color":"#962d28","stroke":"#962d28","fill":"#962d28"},"geometry":{"type":"Polygon","coordinates": [[[-71.4111328125,42.53512115995963],[-71.3833236694336,42.53512115995963],[-71.3833236694336,42.54953946116446],[-71.4111328125,42.54953946116446],[-71.4111328125,42.53512115995963]]]}},
|
|
|
|
{"type":"Feature","id":"q1","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-71.55258178710938,42.51361399979923],[-71.42074584960938,42.51361399979923],[-71.42074584960938,42.59100512331456],[-71.55258178710938,42.59100512331456],[-71.55258178710938,42.51361399979923]]]}},
|
|
|
|
{"type":"Feature","id":"q2","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-71.52992248535156,42.48121277771616],[-71.36375427246092,42.48121277771616],[-71.36375427246092,42.57786045892046],[-71.52992248535156,42.57786045892046],[-71.52992248535156,42.48121277771616]]]}},
|
|
|
|
{"type":"Feature","id":"q3","properties":{},"geometry":{"type":"Polygon","coordinates":[[[-71.49490356445312,42.56673588590953],[-71.52236938476562,42.47462922809497],[-71.42898559570312,42.464499337722344],[-71.43241882324219,42.522217752342236],[-71.37954711914061,42.56420729713456],[-71.49490356445312,42.56673588590953]]]}},
|
|
|
|
{"type":"Feature","id":"q4","properties":{},"geometry":{"type":"Point","coordinates": [-71.46366119384766,42.54043355305221]}}
|
|
|
|
]}
|
|
|
|
`
|
|
|
|
p1, _ := geojson.Parse(gjson.Get(json, `features.#[id=="p1"]`).Raw, nil)
|
|
|
|
p2, _ := geojson.Parse(gjson.Get(json, `features.#[id=="p2"]`).Raw, nil)
|
|
|
|
p3, _ := geojson.Parse(gjson.Get(json, `features.#[id=="p3"]`).Raw, nil)
|
|
|
|
p4, _ := geojson.Parse(gjson.Get(json, `features.#[id=="p4"]`).Raw, nil)
|
|
|
|
r1, _ := geojson.Parse(gjson.Get(json, `features.#[id=="r1"]`).Raw, nil)
|
|
|
|
r2, _ := geojson.Parse(gjson.Get(json, `features.#[id=="r2"]`).Raw, nil)
|
|
|
|
r3, _ := geojson.Parse(gjson.Get(json, `features.#[id=="r3"]`).Raw, nil)
|
|
|
|
q1, _ := geojson.Parse(gjson.Get(json, `features.#[id=="q1"]`).Raw, nil)
|
|
|
|
q2, _ := geojson.Parse(gjson.Get(json, `features.#[id=="q2"]`).Raw, nil)
|
|
|
|
q3, _ := geojson.Parse(gjson.Get(json, `features.#[id=="q3"]`).Raw, nil)
|
|
|
|
q4, _ := geojson.Parse(gjson.Get(json, `features.#[id=="q4"]`).Raw, nil)
|
|
|
|
|
|
|
|
c := New()
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
c.Set("p1", p1, nil, nil, 0)
|
|
|
|
c.Set("p2", p2, nil, nil, 0)
|
|
|
|
c.Set("p3", p3, nil, nil, 0)
|
|
|
|
c.Set("p4", p4, nil, nil, 0)
|
|
|
|
c.Set("r1", r1, nil, nil, 0)
|
|
|
|
c.Set("r2", r2, nil, nil, 0)
|
|
|
|
c.Set("r3", r3, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
|
|
|
|
var n int
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(q1, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 3)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(q2, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 7)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(q3, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 4)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Intersects(q1, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(_ string, _ geojson.Object, _ []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 4)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Intersects(q2, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(_ string, _ geojson.Object, _ []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 7)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Intersects(q3, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(_ string, _ geojson.Object, _ []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 5)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Intersects(q3, 0, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(_ string, _ geojson.Object, _ []float64) bool {
|
|
|
|
n++
|
|
|
|
return n <= 1
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 2)
|
|
|
|
|
|
|
|
var items []geojson.Object
|
|
|
|
exitems := []geojson.Object{
|
2020-04-08 06:10:58 +03:00
|
|
|
r2, p4, p1, r1, r3, p3, p2,
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
2020-04-08 06:10:58 +03:00
|
|
|
|
|
|
|
lastDist := float64(-1)
|
|
|
|
distsMonotonic := true
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Nearby(q4, nil, nil,
|
2020-04-08 06:10:58 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64, dist float64) bool {
|
|
|
|
if dist < lastDist {
|
|
|
|
distsMonotonic = false
|
|
|
|
}
|
2018-10-11 00:25:40 +03:00
|
|
|
items = append(items, obj)
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, len(items) == 7)
|
2020-04-08 06:10:58 +03:00
|
|
|
expect(t, distsMonotonic)
|
2018-10-11 00:25:40 +03:00
|
|
|
expect(t, reflect.DeepEqual(items, exitems))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCollectionSparse(t *testing.T) {
|
|
|
|
rect := geojson.NewRect(geometry.Rect{
|
|
|
|
Min: geometry.Point{X: -71.598930, Y: 42.4586739},
|
|
|
|
Max: geometry.Point{X: -71.37302, Y: 42.607937},
|
|
|
|
})
|
|
|
|
N := 10000
|
|
|
|
c := New()
|
|
|
|
r := rect.Rect()
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
x := (r.Max.X-r.Min.X)*rand.Float64() + r.Min.X
|
|
|
|
y := (r.Max.Y-r.Min.Y)*rand.Float64() + r.Min.Y
|
|
|
|
point := PO(x, y)
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
c.Set(fmt.Sprintf("%d", i), point, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
var n int
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(rect, 1, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 4)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(rect, 2, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 16)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(rect, 3, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 64)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Within(rect, 3, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
n++
|
|
|
|
return n <= 30
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 31)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Intersects(rect, 3, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, _ geojson.Object, _ []float64) bool {
|
|
|
|
n++
|
|
|
|
return true
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 64)
|
|
|
|
|
|
|
|
n = 0
|
2019-04-24 15:09:41 +03:00
|
|
|
c.Intersects(rect, 3, nil, nil,
|
2018-10-11 00:25:40 +03:00
|
|
|
func(id string, _ geojson.Object, _ []float64) bool {
|
|
|
|
n++
|
|
|
|
return n <= 30
|
|
|
|
},
|
|
|
|
)
|
|
|
|
expect(t, n == 31)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func testCollectionVerifyContents(t *testing.T, c *Collection, objs map[string]geojson.Object) {
|
|
|
|
for id, o2 := range objs {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
o1, _, _, ok := c.Get(id)
|
2018-10-11 00:25:40 +03:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("ok[%s] = false, expect true", id)
|
|
|
|
}
|
|
|
|
j1 := string(o1.AppendJSON(nil))
|
|
|
|
j2 := string(o2.AppendJSON(nil))
|
|
|
|
if j1 != j2 {
|
|
|
|
t.Fatalf("j1 == %s, expect %s", j1, j2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestManyCollections(t *testing.T) {
|
|
|
|
colsM := make(map[string]*Collection)
|
|
|
|
cols := 100
|
|
|
|
objs := 1000
|
|
|
|
k := 0
|
|
|
|
for i := 0; i < cols; i++ {
|
|
|
|
key := strconv.FormatInt(int64(i), 10)
|
|
|
|
for j := 0; j < objs; j++ {
|
|
|
|
id := strconv.FormatInt(int64(j), 10)
|
|
|
|
p := geometry.Point{
|
|
|
|
X: rand.Float64()*360 - 180,
|
|
|
|
Y: rand.Float64()*180 - 90,
|
|
|
|
}
|
|
|
|
obj := geojson.Object(PO(p.X, p.Y))
|
|
|
|
col, ok := colsM[key]
|
|
|
|
if !ok {
|
|
|
|
col = New()
|
|
|
|
colsM[key] = col
|
|
|
|
}
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
col.Set(id, obj, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
k++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
col := colsM["13"]
|
|
|
|
//println(col.Count())
|
|
|
|
bbox := geometry.Rect{
|
|
|
|
Min: geometry.Point{X: -180, Y: 30},
|
|
|
|
Max: geometry.Point{X: 34, Y: 100},
|
|
|
|
}
|
|
|
|
col.geoSearch(bbox, func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
//println(id)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type testPointItem struct {
|
|
|
|
id string
|
|
|
|
object geojson.Object
|
2020-03-25 21:09:50 +03:00
|
|
|
fields []float64
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
|
2020-03-25 21:09:50 +03:00
|
|
|
func makeBenchFields(nFields int) []float64 {
|
|
|
|
if nFields == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return make([]float64, nFields)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInsert_Fields(t *testing.B) {
|
|
|
|
benchmarkInsert(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInsert_NoFields(t *testing.B) {
|
|
|
|
benchmarkInsert(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkInsert(t *testing.B, nFields int) {
|
2018-10-11 00:25:40 +03:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
items := make([]testPointItem, t.N)
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
items[i] = testPointItem{
|
|
|
|
fmt.Sprintf("%d", i),
|
|
|
|
PO(rand.Float64()*360-180, rand.Float64()*180-90),
|
2020-03-25 21:09:50 +03:00
|
|
|
makeBenchFields(nFields),
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
col := New()
|
|
|
|
t.ResetTimer()
|
|
|
|
for i := 0; i < t.N; i++ {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
col.Set(items[i].id, items[i].object, nil, items[i].fields, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-25 21:09:50 +03:00
|
|
|
func BenchmarkReplace_Fields(t *testing.B) {
|
|
|
|
benchmarkReplace(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkReplace_NoFields(t *testing.B) {
|
|
|
|
benchmarkReplace(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkReplace(t *testing.B, nFields int) {
|
2018-10-11 00:25:40 +03:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
items := make([]testPointItem, t.N)
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
items[i] = testPointItem{
|
|
|
|
fmt.Sprintf("%d", i),
|
|
|
|
PO(rand.Float64()*360-180, rand.Float64()*180-90),
|
2020-03-25 21:09:50 +03:00
|
|
|
makeBenchFields(nFields),
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
col := New()
|
|
|
|
for i := 0; i < t.N; i++ {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
col.Set(items[i].id, items[i].object, nil, items[i].fields, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
t.ResetTimer()
|
|
|
|
for _, i := range rand.Perm(t.N) {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
o, _, _ := col.Set(items[i].id, items[i].object, nil, nil, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
if o != items[i].object {
|
|
|
|
t.Fatal("shoot!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-25 21:09:50 +03:00
|
|
|
func BenchmarkGet_Fields(t *testing.B) {
|
|
|
|
benchmarkGet(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkGet_NoFields(t *testing.B) {
|
|
|
|
benchmarkGet(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkGet(t *testing.B, nFields int) {
|
2018-10-11 00:25:40 +03:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
items := make([]testPointItem, t.N)
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
items[i] = testPointItem{
|
|
|
|
fmt.Sprintf("%d", i),
|
|
|
|
PO(rand.Float64()*360-180, rand.Float64()*180-90),
|
2020-03-25 21:09:50 +03:00
|
|
|
makeBenchFields(nFields),
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
col := New()
|
|
|
|
for i := 0; i < t.N; i++ {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
col.Set(items[i].id, items[i].object, nil, items[i].fields, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
t.ResetTimer()
|
|
|
|
for _, i := range rand.Perm(t.N) {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
o, _, _, _ := col.Get(items[i].id)
|
2018-10-11 00:25:40 +03:00
|
|
|
if o != items[i].object {
|
|
|
|
t.Fatal("shoot!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-25 21:09:50 +03:00
|
|
|
func BenchmarkRemove_Fields(t *testing.B) {
|
|
|
|
benchmarkRemove(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkRemove_NoFields(t *testing.B) {
|
|
|
|
benchmarkRemove(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkRemove(t *testing.B, nFields int) {
|
2018-10-11 00:25:40 +03:00
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
items := make([]testPointItem, t.N)
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
items[i] = testPointItem{
|
|
|
|
fmt.Sprintf("%d", i),
|
|
|
|
PO(rand.Float64()*360-180, rand.Float64()*180-90),
|
2020-03-25 21:09:50 +03:00
|
|
|
makeBenchFields(nFields),
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
col := New()
|
|
|
|
for i := 0; i < t.N; i++ {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
col.Set(items[i].id, items[i].object, nil, items[i].fields, 0)
|
2018-10-11 00:25:40 +03:00
|
|
|
}
|
|
|
|
t.ResetTimer()
|
|
|
|
for _, i := range rand.Perm(t.N) {
|
|
|
|
o, _, _ := col.Delete(items[i].id)
|
|
|
|
if o != items[i].object {
|
|
|
|
t.Fatal("shoot!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-25 21:09:50 +03:00
|
|
|
|
|
|
|
func BenchmarkScan_Fields(t *testing.B) {
|
|
|
|
benchmarkScan(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkScan_NoFields(t *testing.B) {
|
|
|
|
benchmarkScan(t, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkScan(t *testing.B, nFields int) {
|
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
items := make([]testPointItem, t.N)
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
items[i] = testPointItem{
|
|
|
|
fmt.Sprintf("%d", i),
|
|
|
|
PO(rand.Float64()*360-180, rand.Float64()*180-90),
|
|
|
|
makeBenchFields(nFields),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
col := New()
|
|
|
|
for i := 0; i < t.N; i++ {
|
Update expiration logic
This commit changes the logic for managing the expiration of
objects in the database.
Before: There was a server-wide hashmap that stored the
collection key, id, and expiration timestamp for all objects
that had a TTL. The hashmap was occasionally probed at 20
random positions, looking for objects that have expired. Those
expired objects were immediately deleted, and if there was 5
or more objects deleted, then the probe happened again, with
no delay. If the number of objects was less than 5 then the
there was a 1/10th of a second delay before the next probe.
Now: Rather than a server-wide hashmap, each collection has
its own ordered priority queue that stores objects with TTLs.
Rather than probing, there is a background routine that
executes every 1/10th of a second, which pops the expired
objects from the collection queues, and deletes them.
The collection/queue method is a more stable approach than
the hashmap/probing method. With probing, we can run into
major cache misses for some cases where there is wide
TTL duration, such as in the hours or days. This may cause
the system to occasionally fall behind, leaving should-be
expired objects in memory. Using a queue, there is no
cache misses, all objects that should be expired will be
right away, regardless of the TTL durations.
Fixes #616
2021-07-12 23:37:50 +03:00
|
|
|
col.Set(items[i].id, items[i].object, nil, items[i].fields, 0)
|
2020-03-25 21:09:50 +03:00
|
|
|
}
|
|
|
|
t.ResetTimer()
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
var scanIteration int
|
|
|
|
col.Scan(true, nil, nil, func(id string, obj geojson.Object, fields []float64) bool {
|
|
|
|
scanIteration++
|
|
|
|
return scanIteration <= 500
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|