forked from ethereum-optimism/optimism
-
Notifications
You must be signed in to change notification settings - Fork 9
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Tests: Batching Benchmarks (ethereum-optimism#9927)
* Add Benchmark for AddSingularBatch * update compressor configs ; address PR comments * Add b.N * Export RandomSingularBatch through batch_test_util.go * measure only the final batch ; other organizational improvements * Add Benchmark for ToRawSpanBatch * update tests * minor fixup * Add Benchmark for adding *All* Span Batches * comment fixups * narrow tests to only test span batches that won't exceed RLP limit * address pr comments
- Loading branch information
1 parent
72ca57e
commit b250918
Showing
3 changed files
with
265 additions
and
22 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,232 @@ | ||
package benchmarks | ||
|
||
import ( | ||
"fmt" | ||
"math/big" | ||
"math/rand" | ||
"testing" | ||
"time" | ||
|
||
"github.com/ethereum-optimism/optimism/op-batcher/compressor" | ||
"github.com/ethereum-optimism/optimism/op-node/rollup/derive" | ||
"github.com/stretchr/testify/require" | ||
) | ||
|
||
var ( | ||
|
||
// compressors used in the benchmark | ||
rc, _ = compressor.NewRatioCompressor(compressor.Config{ | ||
TargetOutputSize: 100_000_000_000, | ||
ApproxComprRatio: 0.4, | ||
}) | ||
sc, _ = compressor.NewShadowCompressor(compressor.Config{ | ||
TargetOutputSize: 100_000_000_000, | ||
}) | ||
nc, _ = compressor.NewNonCompressor(compressor.Config{ | ||
TargetOutputSize: 100_000_000_000, | ||
}) | ||
|
||
compressors = map[string]derive.Compressor{ | ||
"NonCompressor": nc, | ||
"RatioCompressor": rc, | ||
"ShadowCompressor": sc, | ||
} | ||
|
||
// batch types used in the benchmark | ||
batchTypes = []uint{ | ||
derive.SpanBatchType, | ||
// uncomment to include singular batches in the benchmark | ||
// singular batches are not included by default because they are not the target of the benchmark | ||
//derive.SingularBatchType, | ||
} | ||
) | ||
|
||
// a test case for the benchmark controls the number of batches and transactions per batch, | ||
// as well as the batch type and compressor used | ||
type BatchingBenchmarkTC struct { | ||
BatchType uint | ||
BatchCount int | ||
txPerBatch int | ||
compKey string | ||
} | ||
|
||
func (t BatchingBenchmarkTC) String() string { | ||
var btype string | ||
if t.BatchType == derive.SingularBatchType { | ||
btype = "Singular" | ||
} | ||
if t.BatchType == derive.SpanBatchType { | ||
btype = "Span" | ||
} | ||
return fmt.Sprintf("BatchType=%s, txPerBatch=%d, BatchCount=%d, Compressor=%s", btype, t.txPerBatch, t.BatchCount, t.compKey) | ||
} | ||
|
||
// BenchmarkChannelOut benchmarks the performance of adding singular batches to a channel out | ||
// this exercises the compression and batching logic, as well as any batch-building logic | ||
// Every Compressor in the compressor map is benchmarked for each test case | ||
// The results of the Benchmark measure *only* the time to add the final batch to the channel out, | ||
// not the time to send all the batches through the channel out | ||
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits if adding larger test cases | ||
func BenchmarkFinalBatchChannelOut(b *testing.B) { | ||
// Targets define the number of batches and transactions per batch to test | ||
type target struct{ bs, tpb int } | ||
targets := []target{ | ||
{10, 1}, | ||
{100, 1}, | ||
{1000, 1}, | ||
|
||
{10, 100}, | ||
{100, 100}, | ||
} | ||
|
||
// build a set of test cases for each batch type, compressor, and target-pair | ||
tests := []BatchingBenchmarkTC{} | ||
for _, bt := range batchTypes { | ||
for compkey := range compressors { | ||
for _, t := range targets { | ||
tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey}) | ||
} | ||
} | ||
} | ||
|
||
for _, tc := range tests { | ||
chainID := big.NewInt(333) | ||
rng := rand.New(rand.NewSource(0x543331)) | ||
// pre-generate batches to keep the benchmark from including the random generation | ||
batches := make([]*derive.SingularBatch, tc.BatchCount) | ||
t := time.Now() | ||
for i := 0; i < tc.BatchCount; i++ { | ||
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) | ||
// set the timestamp to increase with each batch | ||
// to leverage optimizations in the Batch Linked List | ||
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix()) | ||
} | ||
b.Run(tc.String(), func(b *testing.B) { | ||
// reset the compressor used in the test case | ||
for bn := 0; bn < b.N; bn++ { | ||
// don't measure the setup time | ||
b.StopTimer() | ||
compressors[tc.compKey].Reset() | ||
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) | ||
cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) | ||
// add all but the final batch to the channel out | ||
for i := 0; i < tc.BatchCount-1; i++ { | ||
_, err := cout.AddSingularBatch(batches[i], 0) | ||
require.NoError(b, err) | ||
} | ||
// measure the time to add the final batch | ||
b.StartTimer() | ||
// add the final batch to the channel out | ||
_, err := cout.AddSingularBatch(batches[tc.BatchCount-1], 0) | ||
require.NoError(b, err) | ||
} | ||
}) | ||
} | ||
} | ||
|
||
// BenchmarkAllBatchesChannelOut benchmarks the performance of adding singular batches to a channel out | ||
// this exercises the compression and batching logic, as well as any batch-building logic | ||
// Every Compressor in the compressor map is benchmarked for each test case | ||
// The results of the Benchmark measure the time to add the *all batches* to the channel out, | ||
// not the time to send all the batches through the channel out | ||
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits | ||
func BenchmarkAllBatchesChannelOut(b *testing.B) { | ||
// Targets define the number of batches and transactions per batch to test | ||
type target struct{ bs, tpb int } | ||
targets := []target{ | ||
{10, 1}, | ||
{100, 1}, | ||
{1000, 1}, | ||
|
||
{10, 100}, | ||
{100, 100}, | ||
} | ||
|
||
// build a set of test cases for each batch type, compressor, and target-pair | ||
tests := []BatchingBenchmarkTC{} | ||
for _, bt := range batchTypes { | ||
for compkey := range compressors { | ||
for _, t := range targets { | ||
tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey}) | ||
} | ||
} | ||
} | ||
|
||
for _, tc := range tests { | ||
chainID := big.NewInt(333) | ||
rng := rand.New(rand.NewSource(0x543331)) | ||
// pre-generate batches to keep the benchmark from including the random generation | ||
batches := make([]*derive.SingularBatch, tc.BatchCount) | ||
t := time.Now() | ||
for i := 0; i < tc.BatchCount; i++ { | ||
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) | ||
// set the timestamp to increase with each batch | ||
// to leverage optimizations in the Batch Linked List | ||
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix()) | ||
} | ||
b.Run(tc.String(), func(b *testing.B) { | ||
// reset the compressor used in the test case | ||
for bn := 0; bn < b.N; bn++ { | ||
// don't measure the setup time | ||
b.StopTimer() | ||
compressors[tc.compKey].Reset() | ||
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) | ||
cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) | ||
b.StartTimer() | ||
// add all batches to the channel out | ||
for i := 0; i < tc.BatchCount; i++ { | ||
_, err := cout.AddSingularBatch(batches[i], 0) | ||
require.NoError(b, err) | ||
} | ||
} | ||
}) | ||
} | ||
} | ||
|
||
// BenchmarkGetRawSpanBatch benchmarks the performance of building a span batch from singular batches | ||
// this exercises the span batch building logic directly | ||
// The adding of batches to the span batch builder is not included in the benchmark, only the final build to RawSpanBatch | ||
func BenchmarkGetRawSpanBatch(b *testing.B) { | ||
// Targets define the number of batches and transactions per batch to test | ||
type target struct{ bs, tpb int } | ||
targets := []target{ | ||
{10, 1}, | ||
{100, 1}, | ||
{1000, 1}, | ||
{10000, 1}, | ||
|
||
{10, 100}, | ||
{100, 100}, | ||
{1000, 100}, | ||
} | ||
|
||
tests := []BatchingBenchmarkTC{} | ||
for _, t := range targets { | ||
tests = append(tests, BatchingBenchmarkTC{derive.SpanBatchType, t.bs, t.tpb, "NonCompressor"}) | ||
} | ||
|
||
for _, tc := range tests { | ||
chainID := big.NewInt(333) | ||
rng := rand.New(rand.NewSource(0x543331)) | ||
// pre-generate batches to keep the benchmark from including the random generation | ||
batches := make([]*derive.SingularBatch, tc.BatchCount) | ||
t := time.Now() | ||
for i := 0; i < tc.BatchCount; i++ { | ||
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) | ||
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix()) | ||
} | ||
b.Run(tc.String(), func(b *testing.B) { | ||
for bn := 0; bn < b.N; bn++ { | ||
// don't measure the setup time | ||
b.StopTimer() | ||
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) | ||
for i := 0; i < tc.BatchCount; i++ { | ||
spanBatchBuilder.AppendSingularBatch(batches[i], 0) | ||
} | ||
b.StartTimer() | ||
_, err := spanBatchBuilder.GetRawSpanBatch() | ||
require.NoError(b, err) | ||
} | ||
}) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
package derive | ||
|
||
import ( | ||
"math/big" | ||
"math/rand" | ||
|
||
"github.com/ethereum-optimism/optimism/op-node/rollup" | ||
"github.com/ethereum-optimism/optimism/op-service/testutils" | ||
"github.com/ethereum/go-ethereum/common/hexutil" | ||
"github.com/ethereum/go-ethereum/core/types" | ||
) | ||
|
||
func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *SingularBatch { | ||
signer := types.NewLondonSigner(chainID) | ||
baseFee := big.NewInt(rng.Int63n(300_000_000_000)) | ||
txsEncoded := make([]hexutil.Bytes, 0, txCount) | ||
// force each tx to have equal chainID | ||
for i := 0; i < txCount; i++ { | ||
tx := testutils.RandomTx(rng, baseFee, signer) | ||
txEncoded, err := tx.MarshalBinary() | ||
if err != nil { | ||
panic("tx Marshal binary" + err.Error()) | ||
} | ||
txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded)) | ||
} | ||
return &SingularBatch{ | ||
ParentHash: testutils.RandomHash(rng), | ||
EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)), | ||
EpochHash: testutils.RandomHash(rng), | ||
Timestamp: uint64(rng.Int63n(2_000_000_000)), | ||
Transactions: txsEncoded, | ||
} | ||
} |