-
Notifications
You must be signed in to change notification settings - Fork 0
/
cacher.go
290 lines (248 loc) · 7.77 KB
/
cacher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
package mlcache
import (
"bytes"
"time"
"github.com/gptankit/mlcache/errs"
)
var _ Cacher = &cacher{}
type CacheStatus bool
const (
CacheStatusSuccess CacheStatus = true
CacheStatusFailure CacheStatus = false
)
const (
NoWorkableCacheFound errs.ErrorMessage = "No workable cache found"
MaxCacheLevelExceeded errs.ErrorMessage = "Max cache level exceeded"
MaxValLenExceeded errs.ErrorMessage = "Max val len exceeded"
InvalidReadPattern errs.ErrorMessage = "Invalid read pattern"
InvalidWritePattern errs.ErrorMessage = "Invalid write pattern"
GetError errs.ErrorMessage = "Get error"
PutError errs.ErrorMessage = "Put error"
DelError errs.ErrorMessage = "Del error"
IsPresentError errs.ErrorMessage = "IsPresent error"
FlushError errs.ErrorMessage = "Flush error"
)
type ReadPattern uint8
type WritePattern uint8
// List of read patterns implemented
const (
// Reads from all caches but backfills first
ReadThrough ReadPattern = iota
// Reads from all caches but backfills later
CacheAside
// End marker, no biggie
endR
)
// List of write patterns implemented
const (
// Writes to all caches in a linear fashion
WriteThrough WritePattern = iota
// Writes to the last cache only
WriteAround
// Writes to the first cache while writing to other caches with slight delay
WriteBack
// End marker, no biggie
endW
)
// Max number of cahe levels allowed in the system
const maxCaches uint8 = 5
// Cacher is an interface to be used by concrete cache implementation
type Cacher interface {
// Get returns the cache item, if present
Get(key *CacheKey) (*bytes.Buffer, time.Time, error)
// Put adds/updates a cache item
Put(key *CacheKey, val *bytes.Buffer, expires time.Time) (CacheStatus, error)
// Del deletes the key from the cache
Del(key *CacheKey) (CacheStatus, error)
// IsPresent returns true if the key is present
IsPresent(key *CacheKey) (CacheStatus, error)
// Flush clears all keys from the cache
Flush() error
}
// lCache is a doubly linked list of caches in the system
type lCache struct {
cur Cacher
prev *lCache
next *lCache
}
// cacher is multi-level cache object
type cacher struct {
// Head lCache pointer
l1Cache *lCache
// Tail lCache pointer
lnCache *lCache
// Current number of caches
numCaches uint8
// Read pattern to be used while Get
readPattern ReadPattern
// Write pattern to be used while Put
writePattern WritePattern
// Cache value size cutoff
maxValSize int
}
// NewMultiLevelCache creates a new mlcache object.
// It expects 0 < num(caches) <= maxCaches and pre-defined read/write patterns to be passed in.
func NewMultiLevelCache(readPattern ReadPattern, writePattern WritePattern, maxValSize int, caches ...Cacher) (Cacher, error) {
numCaches := uint8(len(caches))
if err := validate(numCaches, readPattern, writePattern); err != nil {
return nil, err
}
ci := uint8(0)
eCache := &lCache{cur: caches[ci], prev: nil, next: nil}
sCache := eCache // save head
ci++
for ci < numCaches {
eCache.next = &lCache{cur: caches[ci], prev: eCache, next: nil}
eCache = eCache.next
ci++
}
ca := &cacher{
l1Cache: sCache,
lnCache: eCache,
numCaches: numCaches,
readPattern: readPattern,
writePattern: writePattern,
maxValSize: maxValSize,
}
return ca, nil
}
// Get executes a cache fetch given a key using pre-selected read pattern
func (ca *cacher) Get(key *CacheKey) (*bytes.Buffer, time.Time, error) {
if key == nil {
return nil, time.Now().UTC(), errs.New(GetError)
}
switch ca.readPattern {
case ReadThrough:
this := ca.l1Cache
for this != nil {
cache := this.cur
val, ttl, _ := cache.Get(key) // lookup in a cache
if val != nil { // if found in higher level cache, sync populate all lower level caches and return from lowest level cache
lower := this.prev
for lower != nil {
status, err := lower.cur.Put(key, val, ttl)
if err != nil || status == CacheStatusFailure {
return nil, time.Now().UTC(), err
}
lower = lower.prev
}
return val, ttl, nil
} else {
this = this.next
}
}
case CacheAside:
this := ca.l1Cache
for this != nil {
cache := this.cur
val, ttl, _ := cache.Get(key) // lookup in a cache
if val != nil { // if found in higher level cache, return first and async populate all lower level caches
go func() {
lower := this.prev
for lower != nil {
lower.cur.Put(key, val, ttl)
lower = lower.prev
}
}()
return val, ttl, nil
} else {
this = this.next
}
}
}
return nil, time.Now().UTC(), nil
}
// Get executes a cache add/update given a key, val and expiry time using pre-selected write pattern.
// It expects an absolute value of time (and not duration).
func (ca *cacher) Put(key *CacheKey, val *bytes.Buffer, ttl time.Time) (CacheStatus, error) {
if key == nil {
return CacheStatusFailure, errs.New(PutError)
}
if val != nil && val.Len() > ca.maxValSize {
return CacheStatusFailure, errs.New(MaxValLenExceeded)
}
switch ca.writePattern {
case WriteThrough:
this := ca.l1Cache
for this != nil {
cache := this.cur
cacheStatus, err := cache.Put(key, val, ttl) // put in all cache levels
if err != nil || cacheStatus == CacheStatusFailure {
return CacheStatusFailure, errs.Build(err, PutError)
}
this = this.next
}
return CacheStatusSuccess, nil
case WriteAround:
this := ca.lnCache
cache := this.cur // get level n cache
cacheStatus, err := cache.Put(key, val, ttl) // put in a cache
if err != nil || cacheStatus == CacheStatusFailure {
return CacheStatusFailure, errs.Build(err, PutError)
}
return CacheStatusSuccess, nil
case WriteBack:
this := ca.l1Cache
cache := this.cur // get level 1 cache
cacheStatus, err := cache.Put(key, val, ttl) // put in a cache
if err != nil || cacheStatus == CacheStatusFailure {
return CacheStatusFailure, errs.Build(err, PutError)
} else {
go func(writeBackDelay time.Duration) { // put in upper level caches with delay
time.Sleep(writeBackDelay)
upper := this.next
for upper != nil {
status, err := upper.cur.Put(key, val, ttl)
if err != nil || status == CacheStatusFailure {
break
}
upper = upper.next
}
}(200 * time.Millisecond) // write to higher level caches after waiting for this duration
}
return CacheStatusSuccess, nil
}
return CacheStatusSuccess, nil
}
// Del removes a cache item from all caches
func (ca *cacher) Del(key *CacheKey) (CacheStatus, error) {
if key == nil {
return CacheStatusFailure, errs.New(DelError)
}
// deleting order -> level n to level 1
this := ca.lnCache
for this != nil {
cache := this.cur
cacheStatus, err := cache.Del(key) // delete from cache
if err != nil || cacheStatus == CacheStatusFailure {
return CacheStatusFailure, errs.Build(err, DelError)
}
this = this.prev
}
return CacheStatusSuccess, nil
}
// IsPresent checks if a particular key exists or not.
// It checks only in L1 cache assuming consistency between all cache levels.
// All inconsistencies must be handled using Get/Put methods.
func (ca *cacher) IsPresent(key *CacheKey) (CacheStatus, error) {
if key == nil {
return CacheStatusFailure, errs.New(IsPresentError)
}
this := ca.l1Cache
cache := this.cur // get level 1 cache
cacheStatus, err := cache.IsPresent(key) // check only in level 1 cache, assuming all caches are in sync
if err != nil || cacheStatus == CacheStatusFailure {
return CacheStatusFailure, errs.Build(err, IsPresentError)
}
return CacheStatusSuccess, nil
}
// Flush clears all items from all cache levels
func (ca *cacher) Flush() error {
this := ca.l1Cache
for this != nil {
cache := this.cur
go cache.Flush() // async flush
this = this.next
}
return nil
}