Skip to content

Commit 3f12ab3

Browse files
committed
Added support for Otter cache
1 parent c386401 commit 3f12ab3

File tree

6 files changed

+245
-7
lines changed

6 files changed

+245
-7
lines changed

go.mod

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ module github.com/groupcache/groupcache-go/v3
33
go 1.21
44

55
require (
6+
github.com/maypok86/otter v1.2.0
67
github.com/segmentio/fasthash v1.0.3
78
github.com/stretchr/testify v1.8.1
89
golang.org/x/net v0.22.0
@@ -11,6 +12,8 @@ require (
1112

1213
require (
1314
github.com/davecgh/go-spew v1.1.1 // indirect
15+
github.com/dolthub/maphash v0.1.0 // indirect
16+
github.com/gammazero/deque v0.2.1 // indirect
1417
github.com/pmezard/go-difflib v1.0.0 // indirect
1518
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
1619
gopkg.in/yaml.v3 v3.0.1 // indirect

go.sum

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,15 @@
11
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
22
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
33
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
4+
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
5+
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
6+
github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0=
7+
github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU=
48
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
59
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
610
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
11+
github.com/maypok86/otter v1.2.0 h1:djwBBNpp9+dyzBTY0zscIG+pyAQVXRRRMbzztf8iJ4U=
12+
github.com/maypok86/otter v1.2.0/go.mod h1:mKLfoI7v1HOmQMwFgX4QkRk23mX6ge3RDvjdHOWG4R4=
713
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
814
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
915
github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM=

group.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ type group struct {
7575
// authoritative (otherwise they would be in mainCache), but
7676
// are popular enough to warrant mirroring in this process to
7777
// avoid going over the network to fetch from a peer. Having
78-
// a hotCache avoids network hotspotting, where a peer's
78+
// a hotCache avoids network hot spotting, where a peer's
7979
// network card could become the bottleneck on a popular key.
8080
// This cache is used sparingly to maximize the total number
8181
// of key/value pairs that can be stored globally.
@@ -448,9 +448,9 @@ func (g *group) ResetCacheSize(maxBytes int64) {
448448

449449
// Avoid divide by zero
450450
if maxBytes >= 0 {
451-
// Hot cache is one 8th the size of the main cache
451+
// Hot cache is 1/8th the size of the main cache
452452
hotCache = maxBytes / 8
453-
mainCache = hotCache * 8
453+
mainCache = hotCache * 7
454454
}
455455

456456
g.mainCache = g.instance.opts.CacheFactory(mainCache)

otter.go

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
package groupcache
2+
3+
import (
4+
"sync/atomic"
5+
"time"
6+
7+
"github.com/groupcache/groupcache-go/v3/transport"
8+
"github.com/maypok86/otter"
9+
)
10+
11+
type NowFunc func() time.Time
12+
13+
// OtterCache is an alternative cache implementation which uses a high performance lockless
14+
// cache suitable for use in high concurrency environments where mutex contention is an issue.
15+
type OtterCache struct {
16+
cache otter.Cache[string, transport.ByteView]
17+
rejected atomic.Int64
18+
gets atomic.Int64
19+
20+
// Now is the Now() function the cache will use to determine
21+
// the current time which is used to calculate expired values
22+
// Defaults to time.Now()
23+
Now NowFunc
24+
}
25+
26+
// NewOtterCache instantiates a new cache instance
27+
//
28+
// Due to the algorithm otter uses to evict and track cache item costs, it is recommended to
29+
// use a larger maximum byte size when creating Groups via Instance.NewGroup() when using
30+
// OtterCache if you expect your cached items to be very large. This is because groupcache
31+
// uses a "Main Cache" and a "Hot Cache" system where the "Hot Cache" is 1/8th the size of
32+
// the maximum bytes requested. Because Otter cache may reject items added to the cache
33+
// which are larger than 1/10th of the total capacity of the "Hot Cache" this may result in
34+
// a lower hit rate for the "Hot Cache" when storing large cache items and penalize the
35+
// efficiency of groupcache operation.
36+
//
37+
// For Example:
38+
// If you expect the average item in cache to be 100 bytes, and you create a Group with a cache size
39+
// of 100,000 bytes, then the main cache will be 87,500 bytes and the hot cache will be 12,500 bytes.
40+
// Since the largest possible item in otter cache is 1/10th of the total size of the cache. Then the
41+
// largest item that could possibly fit into the hot cache is 1,250 bytes. If you think any of the
42+
// items you store in groupcache could be larger than 1,250 bytes. Then you should increase the maximum
43+
// bytes in a Group to accommodate the maximum cache item. If you have no estimate of the maximum size
44+
// of items in the groupcache, then you should monitor the `Cache.Stats().Rejected` stat for the cache
45+
// in production and adjust the size accordingly.
46+
func NewOtterCache(maxBytes int64) (*OtterCache, error) {
47+
o := &OtterCache{
48+
Now: time.Now,
49+
}
50+
51+
var err error
52+
o.cache, err = otter.MustBuilder[string, transport.ByteView](int(maxBytes)).
53+
CollectStats().
54+
Cost(func(key string, value transport.ByteView) uint32 {
55+
return uint32(value.Len())
56+
}).
57+
Build()
58+
return o, err
59+
}
60+
61+
// Get returns the item from the cache
62+
func (o *OtterCache) Get(key string) (transport.ByteView, bool) {
63+
i, ok := o.cache.Get(key)
64+
65+
// We don't use otter's TTL as it is universal to every item
66+
// in the cache and groupcache allows users to set a TTL per
67+
// item stored.
68+
if !i.Expire().IsZero() && i.Expire().Before(o.Now()) {
69+
o.cache.Delete(key)
70+
return transport.ByteView{}, false
71+
}
72+
o.gets.Add(1)
73+
return i, ok
74+
}
75+
76+
// Add adds the item to the cache. However, otter has the side effect
77+
// of rejecting an item if the items size (aka, cost) is larger than
78+
// the capacity (max cost) of the cache divided by 10.
79+
//
80+
// If Stats() reports a high number of Rejected items due to large
81+
// cached items exceeding the maximum cost of the "Hot Cache", then you
82+
// should increase the size of the cache such that no cache item is
83+
// larger than the total size of the cache divided by 10.
84+
//
85+
// See s3fifo/policy.go NewPolicy() for details
86+
func (o *OtterCache) Add(key string, value transport.ByteView) {
87+
if ok := o.cache.Set(key, value); !ok {
88+
o.rejected.Add(1)
89+
}
90+
}
91+
92+
func (o *OtterCache) Remove(key string) {
93+
o.cache.Delete(key)
94+
}
95+
96+
func (o *OtterCache) Stats() CacheStats {
97+
s := o.cache.Stats()
98+
return CacheStats{
99+
Bytes: int64(o.cache.Capacity()),
100+
Items: int64(o.cache.Size()),
101+
Rejected: o.rejected.Load(),
102+
Evictions: s.EvictedCount(),
103+
Gets: o.gets.Load(),
104+
Hits: s.Hits(),
105+
}
106+
}
107+
108+
// Bytes always returns 0 bytes used. Otter does not keep track of total bytes,
109+
// and it is impractical for us to attempt to keep track of total bytes in the
110+
// cache. Tracking the size of Add and Eviction is easy. However, we must also
111+
// adjust the total bytes count when items with the same key are replaced.
112+
// Doing so is more computationally expensive as we must check the cache for an
113+
// existing item, subtract the existing byte count, then add the new byte count
114+
// of the replacing item.
115+
//
116+
// Arguably reporting the total bytes used is not as useful as hit ratio
117+
// in a production environment.
118+
func (o *OtterCache) Bytes() int64 {
119+
return 0
120+
}
121+
122+
func (o *OtterCache) Close() {
123+
o.cache.Close()
124+
}

otter_test.go

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
package groupcache_test
2+
3+
import (
4+
"crypto/rand"
5+
"testing"
6+
"time"
7+
8+
"github.com/groupcache/groupcache-go/v3"
9+
"github.com/groupcache/groupcache-go/v3/transport"
10+
"github.com/stretchr/testify/assert"
11+
"github.com/stretchr/testify/require"
12+
)
13+
14+
func TestOtterCrud(t *testing.T) {
15+
c, err := groupcache.NewOtterCache(20_000)
16+
require.NoError(t, err)
17+
18+
c.Add("key1", transport.ByteViewWithExpire([]byte("value1"), time.Time{}))
19+
20+
v, ok := c.Get("key1")
21+
assert.True(t, ok)
22+
assert.Equal(t, "value1", v.String())
23+
assert.Equal(t, int64(1), c.Stats().Hits)
24+
assert.Equal(t, int64(1), c.Stats().Gets)
25+
assert.Equal(t, int64(1), c.Stats().Items)
26+
27+
// This item should be rejected by otter as it's "cost" is too high
28+
c.Add("too-large", transport.ByteViewWithExpire(randomValue((20_000/10)+1), time.Time{}))
29+
assert.Equal(t, int64(1), c.Stats().Rejected)
30+
assert.Equal(t, int64(1), c.Stats().Items)
31+
32+
c.Remove("key1")
33+
assert.Equal(t, int64(1), c.Stats().Hits)
34+
assert.Equal(t, int64(1), c.Stats().Gets)
35+
assert.Equal(t, int64(0), c.Stats().Items)
36+
}
37+
38+
func TestOtterEnsureUpdateExpiredValue(t *testing.T) {
39+
c, err := groupcache.NewOtterCache(20_000)
40+
require.NoError(t, err)
41+
curTime := time.Now()
42+
43+
// Override the now function so we control time
44+
c.Now = func() time.Time {
45+
return curTime
46+
}
47+
48+
// Expires in 1 second
49+
c.Add("key1", transport.ByteViewWithExpire([]byte("value1"), curTime.Add(time.Second)))
50+
_, ok := c.Get("key1")
51+
assert.True(t, ok)
52+
53+
// Advance 1.1 seconds into the future
54+
curTime = curTime.Add(time.Millisecond * 1100)
55+
56+
// Value should have expired
57+
_, ok = c.Get("key1")
58+
assert.False(t, ok)
59+
60+
// Add a new key that expires in 1 second
61+
c.Add("key2", transport.ByteViewWithExpire([]byte("value2"), curTime.Add(time.Second)))
62+
_, ok = c.Get("key2")
63+
assert.True(t, ok)
64+
65+
// Advance 0.5 seconds into the future
66+
curTime = curTime.Add(time.Millisecond * 500)
67+
68+
// Value should still exist
69+
_, ok = c.Get("key2")
70+
assert.True(t, ok)
71+
72+
// Replace the existing key, this should update the expired time
73+
c.Add("key2", transport.ByteViewWithExpire([]byte("updated value2"), curTime.Add(time.Second)))
74+
_, ok = c.Get("key2")
75+
assert.True(t, ok)
76+
77+
// Advance 0.6 seconds into the future, which puts us past the initial
78+
// expired time for key2.
79+
curTime = curTime.Add(time.Millisecond * 600)
80+
81+
// Should still exist
82+
_, ok = c.Get("key2")
83+
assert.True(t, ok)
84+
85+
// Advance 1.1 seconds into the future
86+
curTime = curTime.Add(time.Millisecond * 1100)
87+
88+
// Should not exist
89+
_, ok = c.Get("key2")
90+
assert.False(t, ok)
91+
}
92+
93+
func randomValue(length int) []byte {
94+
bytes := make([]byte, length)
95+
_, _ = rand.Read(bytes)
96+
return bytes
97+
}

stats.go

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,18 @@ func (i *AtomicInt) String() string {
4646

4747
// CacheStats are returned by stats accessors on Group.
4848
type CacheStats struct {
49-
Bytes int64
50-
Items int64
51-
Gets int64
52-
Hits int64
49+
// Rejected is a counter of the total number of items that were not added to
50+
// the cache due to some consideration of the underlying cache implementation.
51+
Rejected int64
52+
// Bytes is a gauge of how many bytes are in the cache
53+
Bytes int64
54+
// Items is a gauge of how many items are in the cache
55+
Items int64
56+
// Gets reports the total get requests
57+
Gets int64
58+
// Hits reports the total successful cache hits
59+
Hits int64
60+
// Evictions reports the total number of evictions
5361
Evictions int64
5462
}
5563

0 commit comments

Comments
 (0)