Skip to content

Commit

Permalink
test(share/eds/cache): benchmark eds cache eviction performance (cele…
Browse files Browse the repository at this point in the history
…stiaorg#2778)

Adds benchmarks to ensure cache overload performance.
1. Measures time to fully load accessor from disk to cache
2. Measures time it takes for multiple parallel ipld readers (clients)
to sample eds directly from store, when there is not enough cache slots.
Results shows it takes ~12s to sample for parallel 10 readers.
  • Loading branch information
walldiss authored Oct 2, 2023
1 parent 849cb67 commit bb9658a
Show file tree
Hide file tree
Showing 2 changed files with 129 additions and 0 deletions.
58 changes: 58 additions & 0 deletions share/eds/store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,13 @@ import (
"github.com/stretchr/testify/require"

"github.com/celestiaorg/celestia-app/pkg/da"
dsbadger "github.com/celestiaorg/go-ds-badger4"
"github.com/celestiaorg/rsmt2d"

"github.com/celestiaorg/celestia-node/share"
"github.com/celestiaorg/celestia-node/share/eds/cache"
"github.com/celestiaorg/celestia-node/share/eds/edstest"
"github.com/celestiaorg/celestia-node/share/ipld"
)

func TestEDSStore(t *testing.T) {
Expand Down Expand Up @@ -465,6 +467,62 @@ func BenchmarkStore(b *testing.B) {
})
}

// BenchmarkCacheEviction benchmarks the time it takes to load a block to the cache, when the
// cache size is set to 1. This forces cache eviction on every read.
// BenchmarkCacheEviction-10/128 384 3533586 ns/op (~3ms)
func BenchmarkCacheEviction(b *testing.B) {
const (
blocks = 4
size = 128
)

ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
b.Cleanup(cancel)

dir := b.TempDir()
ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions)
require.NoError(b, err)

newStore := func(params *Parameters) *Store {
edsStore, err := NewStore(params, dir, ds)
require.NoError(b, err)
err = edsStore.Start(ctx)
require.NoError(b, err)
return edsStore
}
edsStore := newStore(DefaultParameters())

// generate EDSs and store them
cids := make([]cid.Cid, blocks)
for i := range cids {
eds := edstest.RandEDS(b, size)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(b, err)
err = edsStore.Put(ctx, dah.Hash(), eds)
require.NoError(b, err)

// store cids for read loop later
cids[i] = ipld.MustCidFromNamespacedSha256(dah.RowRoots[0])
}

// restart store to clear cache
require.NoError(b, edsStore.Stop(ctx))

// set BlockstoreCacheSize to 1 to force eviction on every read
params := DefaultParameters()
params.BlockstoreCacheSize = 1
bstore := newStore(params).Blockstore()

// start benchmark
b.ResetTimer()
for i := 0; i < b.N; i++ {
h := cids[i%blocks]
// every read will trigger eviction
_, err := bstore.Get(ctx, h)
require.NoError(b, err)
}
}

func newStore(t *testing.T) (*Store, error) {
t.Helper()

Expand Down
71 changes: 71 additions & 0 deletions share/getters/getter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package getters
import (
"context"
"os"
"sync"
"testing"
"time"

Expand All @@ -12,7 +13,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/celestiaorg/celestia-app/pkg/da"
"github.com/celestiaorg/celestia-app/pkg/wrapper"
dsbadger "github.com/celestiaorg/go-ds-badger4"
"github.com/celestiaorg/rsmt2d"

"github.com/celestiaorg/celestia-node/share"
Expand Down Expand Up @@ -217,6 +220,74 @@ func TestIPLDGetter(t *testing.T) {
})
}

// BenchmarkIPLDGetterOverBusyCache benchmarks the performance of the IPLDGetter when the
// cache size of the underlying blockstore is less than the number of blocks being requested in
// parallel. This is to ensure performance doesn't degrade when the cache is being frequently
// evicted.
// BenchmarkIPLDGetterOverBusyCache-10/128 1 12460428417 ns/op (~12s)
func BenchmarkIPLDGetterOverBusyCache(b *testing.B) {
const (
blocks = 10
size = 128
)

ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
b.Cleanup(cancel)

dir := b.TempDir()
ds, err := dsbadger.NewDatastore(dir, &dsbadger.DefaultOptions)
require.NoError(b, err)

newStore := func(params *eds.Parameters) *eds.Store {
edsStore, err := eds.NewStore(params, dir, ds)
require.NoError(b, err)
err = edsStore.Start(ctx)
require.NoError(b, err)
return edsStore
}
edsStore := newStore(eds.DefaultParameters())

// generate EDSs and store them
hashes := make([]da.DataAvailabilityHeader, blocks)
for i := range hashes {
eds := edstest.RandEDS(b, size)
dah, err := da.NewDataAvailabilityHeader(eds)
require.NoError(b, err)
err = edsStore.Put(ctx, dah.Hash(), eds)
require.NoError(b, err)

// store cids for read loop later
hashes[i] = dah
}

// restart store to clear cache
require.NoError(b, edsStore.Stop(ctx))

// set BlockstoreCacheSize to 1 to force eviction on every read
params := eds.DefaultParameters()
params.BlockstoreCacheSize = 1
edsStore = newStore(params)
bstore := edsStore.Blockstore()
bserv := ipld.NewBlockservice(bstore, offline.Exchange(bstore))

// start client
getter := NewIPLDGetter(bserv)

// request blocks in parallel
b.ResetTimer()
g := sync.WaitGroup{}
g.Add(blocks)
for _, h := range hashes {
h := h
go func() {
defer g.Done()
_, err := getter.GetEDS(ctx, &h)
require.NoError(b, err)
}()
}
g.Wait()
}

func randomEDS(t *testing.T) (*rsmt2d.ExtendedDataSquare, *share.Root) {
eds := edstest.RandEDS(t, 4)
dah, err := share.NewRoot(eds)
Expand Down

0 comments on commit bb9658a

Please sign in to comment.