From 6fb175b69f1a27b8f70ab6caf55e5c225840409c Mon Sep 17 00:00:00 2001 From: Sotirios Mantziaris Date: Sat, 16 Nov 2024 15:15:08 +0200 Subject: [PATCH 1/2] LRU upgrade --- cache/lru/lru.go | 53 +++- cache/lru/lru_test.go | 46 +++- cache/metric.go | 18 +- go.mod | 2 +- go.sum | 4 +- .../hashicorp/golang-lru/.golangci.yml | 30 -- .../github.com/hashicorp/golang-lru/README.md | 7 - vendor/github.com/hashicorp/golang-lru/arc.go | 256 ------------------ vendor/github.com/hashicorp/golang-lru/doc.go | 21 -- .../hashicorp/golang-lru/testing.go | 16 -- .../hashicorp/golang-lru/{ => v2}/.gitignore | 0 .../hashicorp/golang-lru/v2/.golangci.yml | 46 ++++ .../hashicorp/golang-lru/{ => v2}/2q.go | 103 +++++-- .../hashicorp/golang-lru/{ => v2}/LICENSE | 0 .../hashicorp/golang-lru/v2/README.md | 79 ++++++ .../github.com/hashicorp/golang-lru/v2/doc.go | 24 ++ .../hashicorp/golang-lru/v2/internal/list.go | 142 ++++++++++ .../hashicorp/golang-lru/{ => v2}/lru.go | 89 +++--- .../golang-lru/v2/simplelru/LICENSE_list | 29 ++ .../golang-lru/{ => v2}/simplelru/lru.go | 122 ++++----- .../{ => v2}/simplelru/lru_interface.go | 24 +- vendor/modules.txt | 9 +- 22 files changed, 626 insertions(+), 494 deletions(-) delete mode 100644 vendor/github.com/hashicorp/golang-lru/.golangci.yml delete mode 100644 vendor/github.com/hashicorp/golang-lru/README.md delete mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/testing.go rename vendor/github.com/hashicorp/golang-lru/{ => v2}/.gitignore (100%) create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml rename vendor/github.com/hashicorp/golang-lru/{ => v2}/2q.go (64%) rename vendor/github.com/hashicorp/golang-lru/{ => v2}/LICENSE (100%) create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/internal/list.go rename vendor/github.com/hashicorp/golang-lru/{ => v2}/lru.go (72%) create mode 100644 vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list rename vendor/github.com/hashicorp/golang-lru/{ => v2}/simplelru/lru.go (50%) rename vendor/github.com/hashicorp/golang-lru/{ => v2}/simplelru/lru_interface.go (68%) diff --git a/cache/lru/lru.go b/cache/lru/lru.go index f909454dfd..6daa947b9f 100644 --- a/cache/lru/lru.go +++ b/cache/lru/lru.go @@ -5,60 +5,83 @@ import ( "context" "github.com/beatlabs/patron/cache" - lru "github.com/hashicorp/golang-lru" + lru "github.com/hashicorp/golang-lru/v2" "go.opentelemetry.io/otel/attribute" ) var ( - _ cache.Cache = &Cache{} - lruAttribute = attribute.String("cache.type", "lru") + lruAttribute = attribute.String("cache.type", "lru") + lruEvictAttribute = attribute.String("cache.type", "lru-evict") ) // Cache encapsulates a thread-safe fixed size LRU cache. -type Cache struct { - cache *lru.Cache +type Cache[k comparable, v any] struct { + cache *lru.Cache[k, v] useCaseAttribute attribute.KeyValue + typeAttribute attribute.KeyValue } // New returns a new LRU cache that can hold 'size' number of keys at a time. -func New(size int, useCase string) (*Cache, error) { +func New[k comparable, v any](size int, useCase string) (*Cache[k, v], error) { cache.SetupMetricsOnce() - chc, err := lru.New(size) + chc, err := lru.New[k, v](size) if err != nil { return nil, err } - return &Cache{ + return newFunction(chc, lruAttribute, cache.UseCaseAttribute(useCase)) +} + +// NewWithEvict returns a new LRU cache that can hold 'size' number of keys at a time. +func NewWithEvict[k comparable, v any](size int, useCase string, onEvict func(k, v)) (*Cache[k, v], error) { + cache.SetupMetricsOnce() + + chc, err := lru.NewWithEvict[k, v](size, func(key k, value v) { + onEvict(key, value) + cache.ObserveEviction(context.Background(), lruEvictAttribute, cache.UseCaseAttribute(useCase)) + }) + if err != nil { + return nil, err + } + + return newFunction(chc, lruEvictAttribute, cache.UseCaseAttribute(useCase)) +} + +func newFunction[k comparable, v any](chc *lru.Cache[k, v], typeAttr attribute.KeyValue, + useCaseAttr attribute.KeyValue, +) (*Cache[k, v], error) { + return &Cache[k, v]{ cache: chc, - useCaseAttribute: cache.UseCaseAttribute(useCase), + typeAttribute: typeAttr, + useCaseAttribute: useCaseAttr, }, nil } // Get executes a lookup and returns whether a key exists in the cache along with its value. -func (c *Cache) Get(ctx context.Context, key string) (interface{}, bool, error) { +func (c *Cache[k, v]) Get(ctx context.Context, key k) (interface{}, bool, error) { value, ok := c.cache.Get(key) if !ok { - cache.ObserveMiss(ctx, lruAttribute, c.useCaseAttribute) + cache.ObserveMiss(ctx, lruAttribute, c.useCaseAttribute, c.typeAttribute) return nil, false, nil } - cache.ObserveHit(ctx, lruAttribute, c.useCaseAttribute) + cache.ObserveHit(ctx, lruAttribute, c.useCaseAttribute, c.typeAttribute) return value, true, nil } // Purge evicts all keys present in the cache. -func (c *Cache) Purge(_ context.Context) error { +func (c *Cache[k, v]) Purge(_ context.Context) error { c.cache.Purge() return nil } // Remove evicts a specific key from the cache. -func (c *Cache) Remove(_ context.Context, key string) error { +func (c *Cache[k, v]) Remove(_ context.Context, key k) error { c.cache.Remove(key) return nil } // Set registers a key-value pair to the cache. -func (c *Cache) Set(_ context.Context, key string, value interface{}) error { +func (c *Cache[k, v]) Set(_ context.Context, key k, value v) error { c.cache.Add(key, value) return nil } diff --git a/cache/lru/lru_test.go b/cache/lru/lru_test.go index 67de018821..34e50db4a3 100644 --- a/cache/lru/lru_test.go +++ b/cache/lru/lru_test.go @@ -21,7 +21,32 @@ func TestNew(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { - c, err := New(tt.size, "test") + c, err := New[string, string](tt.size, "test") + if tt.wantErr { + assert.Nil(t, c) + assert.EqualError(t, err, tt.err) + } else { + assert.NotNil(t, c) + require.NoError(t, err) + } + }) + } +} + +func TestNewWithEvict(t *testing.T) { + tests := map[string]struct { + err string + size int + wantErr bool + }{ + "negative size": {size: -1, wantErr: true, err: "must provide a positive size"}, + "zero size": {size: 0, wantErr: true, err: "must provide a positive size"}, + "positive size": {size: 1024, wantErr: false}, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + c, err := NewWithEvict[string, string](tt.size, "test", func(k, v string) {}) if tt.wantErr { assert.Nil(t, c) assert.EqualError(t, err, tt.err) @@ -34,7 +59,7 @@ func TestNew(t *testing.T) { } func TestCacheOperations(t *testing.T) { - c, err := New(10, "test") + c, err := NewWithEvict[string, string](10, "test", func(k, v string) {}) assert.NotNil(t, c) require.NoError(t, err) @@ -80,3 +105,20 @@ func TestCacheOperations(t *testing.T) { assert.Equal(t, 0, c.cache.Len()) }) } + +func BenchmarkCache(b *testing.B) { + c, err := NewWithEvict[int, int](b.N, "test", func(k, v int) {}) + require.NoError(b, err) + + ctx := context.Background() + for i := 0; i < b.N; i++ { + err = c.Set(ctx, i, i) + require.NoError(b, err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err = c.Get(ctx, i) + require.NoError(b, err) + } +} diff --git a/cache/metric.go b/cache/metric.go index 51eda22701..ccbb5ab93e 100644 --- a/cache/metric.go +++ b/cache/metric.go @@ -12,10 +12,11 @@ import ( const packageName = "cache" var ( - cashHitAttribute = attribute.String("cache.status", "hit") - cashMissAttribute = attribute.String("cache.status", "miss") - cacheCounter metric.Int64Counter - cacheOnce sync.Once + cacheHitAttribute = attribute.String("cache.status", "hit") + cacheMissAttribute = attribute.String("cache.status", "miss") + cacheEvictAttribute = attribute.String("cache.status", "evict") + cacheCounter metric.Int64Counter + cacheOnce sync.Once ) // SetupMetricsOnce initializes the cache counter. @@ -32,12 +33,17 @@ func UseCaseAttribute(useCase string) attribute.KeyValue { // ObserveHit increments the cache hit counter. func ObserveHit(ctx context.Context, attrs ...attribute.KeyValue) { - attrs = append(attrs, cashHitAttribute) + attrs = append(attrs, cacheHitAttribute) cacheCounter.Add(ctx, 1, metric.WithAttributes(attrs...)) } // ObserveMiss increments the cache miss counter. func ObserveMiss(ctx context.Context, attrs ...attribute.KeyValue) { - attrs = append(attrs, cashMissAttribute) + attrs = append(attrs, cacheMissAttribute) + cacheCounter.Add(ctx, 1, metric.WithAttributes(attrs...)) +} + +func ObserveEviction(ctx context.Context, attrs ...attribute.KeyValue) { + attrs = append(attrs, cacheEvictAttribute) cacheCounter.Add(ctx, 1, metric.WithAttributes(attrs...)) } diff --git a/go.mod b/go.mod index 861a2a27ef..8037b5bd6f 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/elastic/go-elasticsearch/v8 v8.16.0 github.com/go-sql-driver/mysql v1.8.1 github.com/google/uuid v1.6.0 - github.com/hashicorp/golang-lru v1.0.2 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/rabbitmq/amqp091-go v1.10.0 github.com/redis/go-redis/extra/redisotel/v9 v9.7.0 github.com/redis/go-redis/v9 v9.7.0 diff --git a/go.sum b/go.sum index 1cc80c9016..9ade2bb7e8 100644 --- a/go.sum +++ b/go.sum @@ -94,8 +94,8 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= diff --git a/vendor/github.com/hashicorp/golang-lru/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/.golangci.yml deleted file mode 100644 index 49202fc41e..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters: - enable: - - megacheck - - revive - - govet - - unconvert - - megacheck - - gas - - gocyclo - - dupl - - misspell - - unparam - - unused - - typecheck - - ineffassign - - stylecheck - - exportloopref - - gocritic - - nakedret - - gosimple - - prealloc - fast: false - disable-all: true - -issues: - exclude-rules: - - path: _test\.go - linters: - - dupl - exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md deleted file mode 100644 index 03bcfb5b76..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/README.md +++ /dev/null @@ -1,7 +0,0 @@ -golang-lru -========== - -Please upgrade to github.com/hashicorp/golang-lru/v2 for all new code as v1 will -not be updated anymore. The v2 version supports generics and is faster; old code -can specify a specific tag, e.g. github.com/hashicorp/golang-lru/v1.0.2 for -backwards compatibility. diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go deleted file mode 100644 index e396f8428a..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ /dev/null @@ -1,256 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). -// ARC is an enhancement over the standard LRU cache in that tracks both -// frequency and recency of use. This avoids a burst in access to new -// entries from evicting the frequently used older entries. It adds some -// additional tracking overhead to a standard LRU cache, computationally -// it is roughly 2x the cost, and the extra memory overhead is linear -// with the size of the cache. ARC has been patented by IBM, but is -// similar to the TwoQueueCache (2Q) which requires setting parameters. -type ARCCache struct { - size int // Size is the total capacity of the cache - p int // P is the dynamic preference towards T1 or T2 - - t1 simplelru.LRUCache // T1 is the LRU for recently accessed items - b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 - - t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items - b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 - - lock sync.RWMutex -} - -// NewARC creates an ARC of the given size -func NewARC(size int) (*ARCCache, error) { - // Create the sub LRUs - b1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - b2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - - // Initialize the ARC - c := &ARCCache{ - size: size, - p: 0, - t1: t1, - b1: b1, - t2: t2, - b2: b2, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // If the value is contained in T1 (recent), then - // promote it to T2 (frequent) - if val, ok := c.t1.Peek(key); ok { - c.t1.Remove(key) - c.t2.Add(key, val) - return val, ok - } - - // Check if the value is contained in T2 (frequent) - if val, ok := c.t2.Get(key); ok { - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *ARCCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is contained in T1 (recent), and potentially - // promote it to frequent T2 - if c.t1.Contains(key) { - c.t1.Remove(key) - c.t2.Add(key, value) - return - } - - // Check if the value is already in T2 (frequent) and update it - if c.t2.Contains(key) { - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // recently used list - if c.b1.Contains(key) { - // T1 set is too small, increase P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b2Len > b1Len { - delta = b2Len / b1Len - } - if c.p+delta >= c.size { - c.p = c.size - } else { - c.p += delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Remove from B1 - c.b1.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // frequently used list - if c.b2.Contains(key) { - // T2 set is too small, decrease P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b1Len > b2Len { - delta = b1Len / b2Len - } - if delta >= c.p { - c.p = 0 - } else { - c.p -= delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(true) - } - - // Remove from B2 - c.b2.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Keep the size of the ghost buffers trim - if c.b1.Len() > c.size-c.p { - c.b1.RemoveOldest() - } - if c.b2.Len() > c.p { - c.b2.RemoveOldest() - } - - // Add to the recently seen list - c.t1.Add(key, value) -} - -// replace is used to adaptively evict from either T1 or T2 -// based on the current learned value of P -func (c *ARCCache) replace(b2ContainsKey bool) { - t1Len := c.t1.Len() - if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { - k, _, ok := c.t1.RemoveOldest() - if ok { - c.b1.Add(k, nil) - } - } else { - k, _, ok := c.t2.RemoveOldest() - if ok { - c.b2.Add(k, nil) - } - } -} - -// Len returns the number of cached entries -func (c *ARCCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Len() + c.t2.Len() -} - -// Keys returns all the cached keys -func (c *ARCCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.t1.Keys() - k2 := c.t2.Keys() - return append(k1, k2...) -} - -// Remove is used to purge a key from the cache -func (c *ARCCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.t1.Remove(key) { - return - } - if c.t2.Remove(key) { - return - } - if c.b1.Remove(key) { - return - } - if c.b2.Remove(key) { - return - } -} - -// Purge is used to clear the cache -func (c *ARCCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.t1.Purge() - c.t2.Purge() - c.b1.Purge() - c.b2.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *ARCCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Contains(key) || c.t2.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.t1.Peek(key); ok { - return val, ok - } - return c.t2.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go deleted file mode 100644 index 2547df979d..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package lru provides three different LRU caches of varying sophistication. -// -// Cache is a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru -// -// TwoQueueCache tracks frequently used and recently used entries separately. -// This avoids a burst of accesses from taking out frequently used entries, -// at the cost of about 2x computational overhead and some extra bookkeeping. -// -// ARCCache is an adaptive replacement cache. It tracks recent evictions as -// well as recent usage in both the frequent and recent caches. Its -// computational overhead is comparable to TwoQueueCache, but the memory -// overhead is linear with the size of the cache. -// -// ARC has been patented by IBM, so do not use it if that is problematic for -// your program. -// -// All caches in this package take locks while operating, and are therefore -// thread-safe for consumers. -package lru diff --git a/vendor/github.com/hashicorp/golang-lru/testing.go b/vendor/github.com/hashicorp/golang-lru/testing.go deleted file mode 100644 index 492760782c..0000000000 --- a/vendor/github.com/hashicorp/golang-lru/testing.go +++ /dev/null @@ -1,16 +0,0 @@ -package lru - -import ( - "crypto/rand" - "math" - "math/big" - "testing" -) - -func getRand(tb testing.TB) int64 { - out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) - if err != nil { - tb.Fatal(err) - } - return out.Int64() -} diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/v2/.gitignore similarity index 100% rename from vendor/github.com/hashicorp/golang-lru/.gitignore rename to vendor/github.com/hashicorp/golang-lru/v2/.gitignore diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml new file mode 100644 index 0000000000..7e7b8a9627 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml @@ -0,0 +1,46 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +linters: + fast: false + disable-all: true + enable: + - revive + - megacheck + - govet + - unconvert + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + # - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + +# golangci-lint configuration file +linters-settings: + revive: + ignore-generated-header: true + severity: warning + rules: + - name: package-comments + severity: warning + disabled: true + - name: exported + severity: warning + disabled: false + arguments: ["checkPrivateReceivers", "disableStutteringCheck"] + +issues: + exclude-use-default: false + exclude-rules: + - path: _test\.go + linters: + - dupl diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/v2/2q.go similarity index 64% rename from vendor/github.com/hashicorp/golang-lru/2q.go rename to vendor/github.com/hashicorp/golang-lru/v2/2q.go index 15fcad0306..8c95252b6f 100644 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/2q.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package lru import ( - "fmt" + "errors" "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" ) const ( @@ -26,33 +29,35 @@ const ( // computationally about 2x the cost, and adds some metadata over // head. The ARCCache is similar, but does not require setting any // parameters. -type TwoQueueCache struct { - size int - recentSize int +type TwoQueueCache[K comparable, V any] struct { + size int + recentSize int + recentRatio float64 + ghostRatio float64 - recent simplelru.LRUCache - frequent simplelru.LRUCache - recentEvict simplelru.LRUCache + recent simplelru.LRUCache[K, V] + frequent simplelru.LRUCache[K, V] + recentEvict simplelru.LRUCache[K, struct{}] lock sync.RWMutex } // New2Q creates a new TwoQueueCache using the default // values for the parameters. -func New2Q(size int) (*TwoQueueCache, error) { - return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +func New2Q[K comparable, V any](size int) (*TwoQueueCache[K, V], error) { + return New2QParams[K, V](size, Default2QRecentRatio, Default2QGhostEntries) } // New2QParams creates a new TwoQueueCache using the provided // parameter values. -func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) { +func New2QParams[K comparable, V any](size int, recentRatio, ghostRatio float64) (*TwoQueueCache[K, V], error) { if size <= 0 { - return nil, fmt.Errorf("invalid size") + return nil, errors.New("invalid size") } if recentRatio < 0.0 || recentRatio > 1.0 { - return nil, fmt.Errorf("invalid recent ratio") + return nil, errors.New("invalid recent ratio") } if ghostRatio < 0.0 || ghostRatio > 1.0 { - return nil, fmt.Errorf("invalid ghost ratio") + return nil, errors.New("invalid ghost ratio") } // Determine the sub-sizes @@ -60,23 +65,25 @@ func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, err evictSize := int(float64(size) * ghostRatio) // Allocate the LRUs - recent, err := simplelru.NewLRU(size, nil) + recent, err := simplelru.NewLRU[K, V](size, nil) if err != nil { return nil, err } - frequent, err := simplelru.NewLRU(size, nil) + frequent, err := simplelru.NewLRU[K, V](size, nil) if err != nil { return nil, err } - recentEvict, err := simplelru.NewLRU(evictSize, nil) + recentEvict, err := simplelru.NewLRU[K, struct{}](evictSize, nil) if err != nil { return nil, err } // Initialize the cache - c := &TwoQueueCache{ + c := &TwoQueueCache[K, V]{ size: size, recentSize: recentSize, + recentRatio: recentRatio, + ghostRatio: ghostRatio, recent: recent, frequent: frequent, recentEvict: recentEvict, @@ -85,7 +92,7 @@ func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, err } // Get looks up a key's value from the cache. -func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { +func (c *TwoQueueCache[K, V]) Get(key K) (value V, ok bool) { c.lock.Lock() defer c.lock.Unlock() @@ -103,11 +110,11 @@ func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { } // No hit - return nil, false + return } // Add adds a value to the cache. -func (c *TwoQueueCache) Add(key, value interface{}) { +func (c *TwoQueueCache[K, V]) Add(key K, value V) { c.lock.Lock() defer c.lock.Unlock() @@ -141,7 +148,7 @@ func (c *TwoQueueCache) Add(key, value interface{}) { } // ensureSpace is used to ensure we have space in the cache -func (c *TwoQueueCache) ensureSpace(recentEvict bool) { +func (c *TwoQueueCache[K, V]) ensureSpace(recentEvict bool) { // If we have space, nothing to do recentLen := c.recent.Len() freqLen := c.frequent.Len() @@ -153,7 +160,7 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { // the target, evict from there if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { k, _, _ := c.recent.RemoveOldest() - c.recentEvict.Add(k, nil) + c.recentEvict.Add(k, struct{}{}) return } @@ -162,15 +169,43 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) { } // Len returns the number of items in the cache. -func (c *TwoQueueCache) Len() int { +func (c *TwoQueueCache[K, V]) Len() int { c.lock.RLock() defer c.lock.RUnlock() return c.recent.Len() + c.frequent.Len() } +// Resize changes the cache size. +func (c *TwoQueueCache[K, V]) Resize(size int) (evicted int) { + c.lock.Lock() + defer c.lock.Unlock() + + // Recalculate the sub-sizes + recentSize := int(float64(size) * c.recentRatio) + evictSize := int(float64(size) * c.ghostRatio) + c.size = size + c.recentSize = recentSize + + // ensureSpace + diff := c.recent.Len() + c.frequent.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.ensureSpace(true) + } + + // Reallocate the LRUs + c.recent.Resize(size) + c.frequent.Resize(size) + c.recentEvict.Resize(evictSize) + + return diff +} + // Keys returns a slice of the keys in the cache. // The frequently used keys are first in the returned slice. -func (c *TwoQueueCache) Keys() []interface{} { +func (c *TwoQueueCache[K, V]) Keys() []K { c.lock.RLock() defer c.lock.RUnlock() k1 := c.frequent.Keys() @@ -178,8 +213,18 @@ func (c *TwoQueueCache) Keys() []interface{} { return append(k1, k2...) } +// Values returns a slice of the values in the cache. +// The frequently used values are first in the returned slice. +func (c *TwoQueueCache[K, V]) Values() []V { + c.lock.RLock() + defer c.lock.RUnlock() + v1 := c.frequent.Values() + v2 := c.recent.Values() + return append(v1, v2...) +} + // Remove removes the provided key from the cache. -func (c *TwoQueueCache) Remove(key interface{}) { +func (c *TwoQueueCache[K, V]) Remove(key K) { c.lock.Lock() defer c.lock.Unlock() if c.frequent.Remove(key) { @@ -194,7 +239,7 @@ func (c *TwoQueueCache) Remove(key interface{}) { } // Purge is used to completely clear the cache. -func (c *TwoQueueCache) Purge() { +func (c *TwoQueueCache[K, V]) Purge() { c.lock.Lock() defer c.lock.Unlock() c.recent.Purge() @@ -204,7 +249,7 @@ func (c *TwoQueueCache) Purge() { // Contains is used to check if the cache contains a key // without updating recency or frequency. -func (c *TwoQueueCache) Contains(key interface{}) bool { +func (c *TwoQueueCache[K, V]) Contains(key K) bool { c.lock.RLock() defer c.lock.RUnlock() return c.frequent.Contains(key) || c.recent.Contains(key) @@ -212,7 +257,7 @@ func (c *TwoQueueCache) Contains(key interface{}) bool { // Peek is used to inspect the cache value of a key // without updating recency or frequency. -func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { +func (c *TwoQueueCache[K, V]) Peek(key K) (value V, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if val, ok := c.frequent.Peek(key); ok { diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/v2/LICENSE similarity index 100% rename from vendor/github.com/hashicorp/golang-lru/LICENSE rename to vendor/github.com/hashicorp/golang-lru/v2/LICENSE diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md new file mode 100644 index 0000000000..a942eb5397 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md @@ -0,0 +1,79 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2) + +LRU cache example +================= + +```go +package main + +import ( + "fmt" + "github.com/hashicorp/golang-lru/v2" +) + +func main() { + l, _ := lru.New[int, any](128) + for i := 0; i < 256; i++ { + l.Add(i, nil) + } + if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) + } +} +``` + +Expirable LRU cache example +=========================== + +```go +package main + +import ( + "fmt" + "time" + + "github.com/hashicorp/golang-lru/v2/expirable" +) + +func main() { + // make cache with 10ms TTL and 5 max keys + cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10) + + + // set value under key1. + cache.Add("key1", "val1") + + // get value under key1 + r, ok := cache.Get("key1") + + // check for OK value + if ok { + fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r) + } + + // wait for cache to expire + time.Sleep(time.Millisecond * 12) + + // get value under key1 after key expiration + r, ok = cache.Get("key1") + fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r) + + // set value under key2, would evict old entry because it is already expired. + cache.Add("key2", "val2") + + fmt.Printf("Cache len: %d\n", cache.Len()) + // Output: + // value before expiration is found: true, value: "val1" + // value after expiration is found: false, value: "" + // Cache len: 1 +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/v2/doc.go b/vendor/github.com/hashicorp/golang-lru/v2/doc.go new file mode 100644 index 0000000000..24107ee0ed --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the LRU implementation in +// groupcache: https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, at +// the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as well +// as recent usage in both the frequent and recent caches. Its computational +// overhead is comparable to TwoQueueCache, but the memory overhead is linear +// with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. For this reason, it is in a separate go module contained within +// this repository. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 0000000000..5cd74a0343 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/lru.go similarity index 72% rename from vendor/github.com/hashicorp/golang-lru/lru.go rename to vendor/github.com/hashicorp/golang-lru/v2/lru.go index 895d8e3ea0..a2655f1f31 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/lru.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package lru import ( "sync" - "github.com/hashicorp/golang-lru/simplelru" + "github.com/hashicorp/golang-lru/v2/simplelru" ) const ( @@ -12,23 +15,24 @@ const ( ) // Cache is a thread-safe fixed size LRU cache. -type Cache struct { - lru *simplelru.LRU - evictedKeys, evictedVals []interface{} - onEvictedCB func(k, v interface{}) - lock sync.RWMutex +type Cache[K comparable, V any] struct { + lru *simplelru.LRU[K, V] + evictedKeys []K + evictedVals []V + onEvictedCB func(k K, v V) + lock sync.RWMutex } // New creates an LRU of the given size. -func New(size int) (*Cache, error) { - return NewWithEvict(size, nil) +func New[K comparable, V any](size int) (*Cache[K, V], error) { + return NewWithEvict[K, V](size, nil) } // NewWithEvict constructs a fixed size cache with the given eviction // callback. -func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) { +func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) { // create a cache with default settings - c = &Cache{ + c = &Cache[K, V]{ onEvictedCB: onEvicted, } if onEvicted != nil { @@ -39,21 +43,22 @@ func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, e return } -func (c *Cache) initEvictBuffers() { - c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize) - c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize) +func (c *Cache[K, V]) initEvictBuffers() { + c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]V, 0, DefaultEvictedBufferSize) } // onEvicted save evicted key/val and sent in externally registered callback // outside of critical section -func (c *Cache) onEvicted(k, v interface{}) { +func (c *Cache[K, V]) onEvicted(k K, v V) { c.evictedKeys = append(c.evictedKeys, k) c.evictedVals = append(c.evictedVals, v) } // Purge is used to completely clear the cache. -func (c *Cache) Purge() { - var ks, vs []interface{} +func (c *Cache[K, V]) Purge() { + var ks []K + var vs []V c.lock.Lock() c.lru.Purge() if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { @@ -70,8 +75,9 @@ func (c *Cache) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) (evicted bool) { - var k, v interface{} +func (c *Cache[K, V]) Add(key K, value V) (evicted bool) { + var k K + var v V c.lock.Lock() evicted = c.lru.Add(key, value) if c.onEvictedCB != nil && evicted { @@ -86,7 +92,7 @@ func (c *Cache) Add(key, value interface{}) (evicted bool) { } // Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { +func (c *Cache[K, V]) Get(key K) (value V, ok bool) { c.lock.Lock() value, ok = c.lru.Get(key) c.lock.Unlock() @@ -95,7 +101,7 @@ func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { // Contains checks if a key is in the cache, without updating the // recent-ness or deleting it for being stale. -func (c *Cache) Contains(key interface{}) bool { +func (c *Cache[K, V]) Contains(key K) bool { c.lock.RLock() containKey := c.lru.Contains(key) c.lock.RUnlock() @@ -104,7 +110,7 @@ func (c *Cache) Contains(key interface{}) bool { // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { +func (c *Cache[K, V]) Peek(key K) (value V, ok bool) { c.lock.RLock() value, ok = c.lru.Peek(key) c.lock.RUnlock() @@ -114,8 +120,9 @@ func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { // ContainsOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { - var k, v interface{} +func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) { + var k K + var v V c.lock.Lock() if c.lru.Contains(key) { c.lock.Unlock() @@ -136,8 +143,9 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { // PeekOrAdd checks if a key is in the cache without updating the // recent-ness or deleting it for being stale, and if not, adds the value. // Returns whether found and whether an eviction occurred. -func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { - var k, v interface{} +func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) { + var k K + var v V c.lock.Lock() previous, ok = c.lru.Peek(key) if ok { @@ -153,12 +161,13 @@ func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evi if c.onEvictedCB != nil && evicted { c.onEvictedCB(k, v) } - return nil, false, evicted + return } // Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) (present bool) { - var k, v interface{} +func (c *Cache[K, V]) Remove(key K) (present bool) { + var k K + var v V c.lock.Lock() present = c.lru.Remove(key) if c.onEvictedCB != nil && present { @@ -173,8 +182,9 @@ func (c *Cache) Remove(key interface{}) (present bool) { } // Resize changes the cache size. -func (c *Cache) Resize(size int) (evicted int) { - var ks, vs []interface{} +func (c *Cache[K, V]) Resize(size int) (evicted int) { + var ks []K + var vs []V c.lock.Lock() evicted = c.lru.Resize(size) if c.onEvictedCB != nil && evicted > 0 { @@ -191,8 +201,9 @@ func (c *Cache) Resize(size int) (evicted int) { } // RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { - var k, v interface{} +func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) { + var k K + var v V c.lock.Lock() key, value, ok = c.lru.RemoveOldest() if c.onEvictedCB != nil && ok { @@ -207,7 +218,7 @@ func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { } // GetOldest returns the oldest entry -func (c *Cache) GetOldest() (key, value interface{}, ok bool) { +func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) { c.lock.RLock() key, value, ok = c.lru.GetOldest() c.lock.RUnlock() @@ -215,15 +226,23 @@ func (c *Cache) GetOldest() (key, value interface{}, ok bool) { } // Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { +func (c *Cache[K, V]) Keys() []K { c.lock.RLock() keys := c.lru.Keys() c.lock.RUnlock() return keys } +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *Cache[K, V]) Values() []V { + c.lock.RLock() + values := c.lru.Values() + c.lock.RUnlock() + return values +} + // Len returns the number of items in the cache. -func (c *Cache) Len() int { +func (c *Cache[K, V]) Len() int { c.lock.RLock() length := c.lru.Len() c.lock.RUnlock() diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 0000000000..c4764e6b2f --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go similarity index 50% rename from vendor/github.com/hashicorp/golang-lru/simplelru/lru.go rename to vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go index 9233583c91..f69792388c 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -1,46 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package simplelru import ( - "container/list" "errors" + + "github.com/hashicorp/golang-lru/v2/internal" ) // EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) +type EvictCallback[K comparable, V any] func(key K, value V) // LRU implements a non-thread safe fixed size LRU cache -type LRU struct { +type LRU[K comparable, V any] struct { size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] } // NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { if size <= 0 { return nil, errors.New("must provide a positive size") } - c := &LRU{ + + c := &LRU[K, V]{ size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), onEvict: onEvict, } return c, nil } // Purge is used to completely clear the cache. -func (c *LRU) Purge() { +func (c *LRU[K, V]) Purge() { for k, v := range c.items { if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) + c.onEvict(k, v.Value) } delete(c.items, k) } @@ -48,20 +47,19 @@ func (c *LRU) Purge() { } // Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value + ent.Value = value return false } // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry + ent := c.evictList.PushFront(key, value) + c.items[key] = ent - evict := c.evictList.Len() > c.size + evict := c.evictList.Length() > c.size // Verify size not exceeded if evict { c.removeOldest() @@ -70,37 +68,34 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { } // Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) - if ent.Value.(*entry) == nil { - return nil, false - } - return ent.Value.(*entry).value, true + return ent.Value, true } return } // Contains checks if a key is in the cache, without updating the recent-ness // or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { +func (c *LRU[K, V]) Contains(key K) (ok bool) { _, ok = c.items[key] return ok } // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true + return ent.Value, true } - return nil, ok + return } // Remove removes the provided key from the cache, returning if the // key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { +func (c *LRU[K, V]) Remove(key K) (present bool) { if ent, ok := c.items[key]; ok { c.removeElement(ent) return true @@ -109,44 +104,51 @@ func (c *LRU) Remove(key interface{}) (present bool) { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true + return ent.Key, ent.Value, true } - return nil, nil, false + return } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true } - return nil, nil, false + return } // Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key i++ } return keys } +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + // Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() } // Resize changes the cache size. -func (c *LRU) Resize(size int) (evicted int) { +func (c *LRU[K, V]) Resize(size int) (evicted int) { diff := c.Len() - size if diff < 0 { diff = 0 @@ -159,19 +161,17 @@ func (c *LRU) Resize(size int) (evicted int) { } // removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) + delete(c.items, e.Key) if c.onEvict != nil { - c.onEvict(kv.key, kv.value) + c.onEvict(e.Key, e.Value) } } diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go similarity index 68% rename from vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go rename to vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go index cb7f8caf03..043b8bcc3f 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -1,33 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package simplelru provides simple LRU implementation based on build-in container/list. package simplelru // LRUCache is the interface for simple LRU cache. -type LRUCache interface { +type LRUCache[K comparable, V any] interface { // Adds a value to the cache, returns true if an eviction occurred and // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool + Add(key K, value V) bool // Returns key's value from the cache and // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) + Get(key K) (value V, ok bool) // Checks if a key exists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) + Contains(key K) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) + Peek(key K) (value V, ok bool) // Removes a key from the cache. - Remove(key interface{}) bool + Remove(key K) bool // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) + RemoveOldest() (K, V, bool) // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) + GetOldest() (K, V, bool) // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V // Returns the number of items in the cache. Len() int diff --git a/vendor/modules.txt b/vendor/modules.txt index d6f9710e0e..2f338006ce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -912,10 +912,11 @@ github.com/hashicorp/go-multierror # github.com/hashicorp/go-uuid v1.0.3 ## explicit github.com/hashicorp/go-uuid -# github.com/hashicorp/golang-lru v1.0.2 -## explicit; go 1.12 -github.com/hashicorp/golang-lru -github.com/hashicorp/golang-lru/simplelru +# github.com/hashicorp/golang-lru/v2 v2.0.7 +## explicit; go 1.18 +github.com/hashicorp/golang-lru/v2 +github.com/hashicorp/golang-lru/v2/internal +github.com/hashicorp/golang-lru/v2/simplelru # github.com/jcmturner/aescts/v2 v2.0.0 ## explicit; go 1.13 github.com/jcmturner/aescts/v2 From 324f29a591e3d49d05b635f7647f49b2e2519387 Mon Sep 17 00:00:00 2001 From: Sotirios Mantziaris Date: Sat, 16 Nov 2024 15:23:43 +0200 Subject: [PATCH 2/2] LRU upgrade --- cache/lru/lru.go | 8 ++++---- cache/lru/lru_test.go | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cache/lru/lru.go b/cache/lru/lru.go index 6daa947b9f..06fc6e7145 100644 --- a/cache/lru/lru.go +++ b/cache/lru/lru.go @@ -29,7 +29,7 @@ func New[k comparable, v any](size int, useCase string) (*Cache[k, v], error) { return nil, err } - return newFunction(chc, lruAttribute, cache.UseCaseAttribute(useCase)) + return newFunction(chc, lruAttribute, cache.UseCaseAttribute(useCase)), nil } // NewWithEvict returns a new LRU cache that can hold 'size' number of keys at a time. @@ -44,17 +44,17 @@ func NewWithEvict[k comparable, v any](size int, useCase string, onEvict func(k, return nil, err } - return newFunction(chc, lruEvictAttribute, cache.UseCaseAttribute(useCase)) + return newFunction(chc, lruEvictAttribute, cache.UseCaseAttribute(useCase)), nil } func newFunction[k comparable, v any](chc *lru.Cache[k, v], typeAttr attribute.KeyValue, useCaseAttr attribute.KeyValue, -) (*Cache[k, v], error) { +) *Cache[k, v] { return &Cache[k, v]{ cache: chc, typeAttribute: typeAttr, useCaseAttribute: useCaseAttr, - }, nil + } } // Get executes a lookup and returns whether a key exists in the cache along with its value. diff --git a/cache/lru/lru_test.go b/cache/lru/lru_test.go index 34e50db4a3..b06fc0c9f1 100644 --- a/cache/lru/lru_test.go +++ b/cache/lru/lru_test.go @@ -46,7 +46,7 @@ func TestNewWithEvict(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { - c, err := NewWithEvict[string, string](tt.size, "test", func(k, v string) {}) + c, err := NewWithEvict[string, string](tt.size, "test", func(_, _ string) {}) if tt.wantErr { assert.Nil(t, c) assert.EqualError(t, err, tt.err) @@ -59,7 +59,7 @@ func TestNewWithEvict(t *testing.T) { } func TestCacheOperations(t *testing.T) { - c, err := NewWithEvict[string, string](10, "test", func(k, v string) {}) + c, err := NewWithEvict[string, string](10, "test", func(_, _ string) {}) assert.NotNil(t, c) require.NoError(t, err) @@ -107,7 +107,7 @@ func TestCacheOperations(t *testing.T) { } func BenchmarkCache(b *testing.B) { - c, err := NewWithEvict[int, int](b.N, "test", func(k, v int) {}) + c, err := NewWithEvict[int, int](b.N, "test", func(_, _ int) {}) require.NoError(b, err) ctx := context.Background()