mirror of https://github.com/siddontang/go.git
254 lines
6.3 KiB
Go
254 lines
6.3 KiB
Go
// Copyright 2012, Google Inc. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// cache implements a LRU cache. The implementation borrows heavily
|
|
// from SmallLRUCache (originally by Nathan Schrenk). The object
|
|
// maintains a doubly-linked list of elements. When an element is
|
|
// accessed it is promoted to the head of the list, and when space is
|
|
// needed the element at the tail of the list (the least recently used
|
|
// element) is evicted.
|
|
package cache
|
|
|
|
import (
|
|
"container/list"
|
|
"fmt"
|
|
"sync"
|
|
"time"
|
|
)
|
|
|
|
// LRUCache is a typical LRU cache implementation. If the cache
|
|
// reaches the capacity, the least recently used item is deleted from
|
|
// the cache. Note the capacity is not the number of items, but the
|
|
// total sum of the Size() of each item.
|
|
type LRUCache struct {
|
|
mu sync.Mutex
|
|
|
|
// list & table of *entry objects
|
|
list *list.List
|
|
table map[string]*list.Element
|
|
|
|
// Our current size. Obviously a gross simplification and
|
|
// low-grade approximation.
|
|
size int64
|
|
|
|
// How much we are limiting the cache to.
|
|
capacity int64
|
|
}
|
|
|
|
// Value is the interface values that go into LRUCache need to satisfy
|
|
type Value interface {
|
|
// Size returns how big this value is.
|
|
Size() int
|
|
}
|
|
|
|
// Item is what is stored in the cache
|
|
type Item struct {
|
|
Key string
|
|
Value Value
|
|
}
|
|
|
|
type entry struct {
|
|
key string
|
|
value Value
|
|
size int64
|
|
time_accessed time.Time
|
|
}
|
|
|
|
// NewLRUCache creates a new empty cache with the given capacity.
|
|
func NewLRUCache(capacity int64) *LRUCache {
|
|
return &LRUCache{
|
|
list: list.New(),
|
|
table: make(map[string]*list.Element),
|
|
capacity: capacity,
|
|
}
|
|
}
|
|
|
|
// Get returns a value from the cache, and marks the entry as most
|
|
// recently used.
|
|
func (lru *LRUCache) Get(key string) (v Value, ok bool) {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
element := lru.table[key]
|
|
if element == nil {
|
|
return nil, false
|
|
}
|
|
lru.moveToFront(element)
|
|
return element.Value.(*entry).value, true
|
|
}
|
|
|
|
// Set sets a value in the cache.
|
|
func (lru *LRUCache) Set(key string, value Value) {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
if element := lru.table[key]; element != nil {
|
|
lru.updateInplace(element, value)
|
|
} else {
|
|
lru.addNew(key, value)
|
|
}
|
|
}
|
|
|
|
// SetIfAbsent will set the value in the cache if not present. If the
|
|
// value exists in the cache, we don't set it.
|
|
func (lru *LRUCache) SetIfAbsent(key string, value Value) {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
if element := lru.table[key]; element != nil {
|
|
lru.moveToFront(element)
|
|
} else {
|
|
lru.addNew(key, value)
|
|
}
|
|
}
|
|
|
|
// Delete removes an entry from the cache, and returns if the entry existed.
|
|
func (lru *LRUCache) Delete(key string) bool {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
element := lru.table[key]
|
|
if element == nil {
|
|
return false
|
|
}
|
|
|
|
lru.list.Remove(element)
|
|
delete(lru.table, key)
|
|
lru.size -= element.Value.(*entry).size
|
|
return true
|
|
}
|
|
|
|
// Clear will clear the entire cache.
|
|
func (lru *LRUCache) Clear() {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
lru.list.Init()
|
|
lru.table = make(map[string]*list.Element)
|
|
lru.size = 0
|
|
}
|
|
|
|
// SetCapacity will set the capacity of the cache. If the capacity is
|
|
// smaller, and the current cache size exceed that capacity, the cache
|
|
// will be shrank.
|
|
func (lru *LRUCache) SetCapacity(capacity int64) {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
lru.capacity = capacity
|
|
lru.checkCapacity()
|
|
}
|
|
|
|
// Stats returns a few stats on the cache.
|
|
func (lru *LRUCache) Stats() (length, size, capacity int64, oldest time.Time) {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
if lastElem := lru.list.Back(); lastElem != nil {
|
|
oldest = lastElem.Value.(*entry).time_accessed
|
|
}
|
|
return int64(lru.list.Len()), lru.size, lru.capacity, oldest
|
|
}
|
|
|
|
// StatsJSON returns stats as a JSON object in a string.
|
|
func (lru *LRUCache) StatsJSON() string {
|
|
if lru == nil {
|
|
return "{}"
|
|
}
|
|
l, s, c, o := lru.Stats()
|
|
return fmt.Sprintf("{\"Length\": %v, \"Size\": %v, \"Capacity\": %v, \"OldestAccess\": \"%v\"}", l, s, c, o)
|
|
}
|
|
|
|
// Length returns how many elements are in the cache
|
|
func (lru *LRUCache) Length() int64 {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
return int64(lru.list.Len())
|
|
}
|
|
|
|
// Size returns the sum of the objects' Size() method.
|
|
func (lru *LRUCache) Size() int64 {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
return lru.size
|
|
}
|
|
|
|
// Capacity returns the cache maximum capacity.
|
|
func (lru *LRUCache) Capacity() int64 {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
return lru.capacity
|
|
}
|
|
|
|
// Oldest returns the insertion time of the oldest element in the cache,
|
|
// or a IsZero() time if cache is empty.
|
|
func (lru *LRUCache) Oldest() (oldest time.Time) {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
if lastElem := lru.list.Back(); lastElem != nil {
|
|
oldest = lastElem.Value.(*entry).time_accessed
|
|
}
|
|
return
|
|
}
|
|
|
|
// Keys returns all the keys for the cache, ordered from most recently
|
|
// used to last recently used.
|
|
func (lru *LRUCache) Keys() []string {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
keys := make([]string, 0, lru.list.Len())
|
|
for e := lru.list.Front(); e != nil; e = e.Next() {
|
|
keys = append(keys, e.Value.(*entry).key)
|
|
}
|
|
return keys
|
|
}
|
|
|
|
// Items returns all the values for the cache, ordered from most recently
|
|
// used to last recently used.
|
|
func (lru *LRUCache) Items() []Item {
|
|
lru.mu.Lock()
|
|
defer lru.mu.Unlock()
|
|
|
|
items := make([]Item, 0, lru.list.Len())
|
|
for e := lru.list.Front(); e != nil; e = e.Next() {
|
|
v := e.Value.(*entry)
|
|
items = append(items, Item{Key: v.key, Value: v.value})
|
|
}
|
|
return items
|
|
}
|
|
|
|
func (lru *LRUCache) updateInplace(element *list.Element, value Value) {
|
|
valueSize := int64(value.Size())
|
|
sizeDiff := valueSize - element.Value.(*entry).size
|
|
element.Value.(*entry).value = value
|
|
element.Value.(*entry).size = valueSize
|
|
lru.size += sizeDiff
|
|
lru.moveToFront(element)
|
|
lru.checkCapacity()
|
|
}
|
|
|
|
func (lru *LRUCache) moveToFront(element *list.Element) {
|
|
lru.list.MoveToFront(element)
|
|
element.Value.(*entry).time_accessed = time.Now()
|
|
}
|
|
|
|
func (lru *LRUCache) addNew(key string, value Value) {
|
|
newEntry := &entry{key, value, int64(value.Size()), time.Now()}
|
|
element := lru.list.PushFront(newEntry)
|
|
lru.table[key] = element
|
|
lru.size += newEntry.size
|
|
lru.checkCapacity()
|
|
}
|
|
|
|
func (lru *LRUCache) checkCapacity() {
|
|
// Partially duplicated from Delete
|
|
for lru.size > lru.capacity {
|
|
delElem := lru.list.Back()
|
|
delValue := delElem.Value.(*entry)
|
|
lru.list.Remove(delElem)
|
|
delete(lru.table, delValue.key)
|
|
lru.size -= delValue.size
|
|
}
|
|
}
|