From 61c8c5216f77963067e489b1236671297c240d6f Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Sun, 25 Feb 2024 12:03:31 -0800 Subject: [PATCH 1/2] Avoid freq switch of non-4844 to 4844 batch post Logic to prevent switching from non-4844 batches to 4844 batches too often, so that blocks can be filled efficiently. The geth txpool rejects txs for accounts that already have the other type of txs in the pool with "address already reserved". This logic makes sure that, if there is a backlog, that enough non-4844 batches have been posted to fill a block before switching. --- arbnode/batch_poster.go | 80 +++++++++++++++++++++++++++++++++--- arbnode/batch_poster_test.go | 70 +++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 6 deletions(-) create mode 100644 arbnode/batch_poster_test.go diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e09775ea44..40d51e0192 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -91,6 +91,7 @@ type BatchPoster struct { dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] + batch4844History *BoolRing // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. @@ -310,6 +311,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e bridgeAddr: opts.DeployInfo.Bridge, daWriter: opts.DAWriter, redisLock: redisLock, + batch4844History: NewBoolRing(16), } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) if err != nil { @@ -967,12 +969,23 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if config.IgnoreBlobPrice { use4844 = true } else { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + backlog := atomic.LoadUint64(&b.backlog) + // Logic to prevent switching from non-4844 batches to 4844 batches too often, + // so that blocks can be filled efficiently. The geth txpool rejects txs for + // accounts that already have the other type of txs in the pool with + // "address already reserved". This logic makes sure that, if there is a backlog, + // that enough non-4844 batches have been posted to fill a block before switching. + if backlog == 0 || + b.batch4844History.Empty() || + b.batch4844History.Peek() || + b.batch4844History.All(false) { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } } } } @@ -1208,9 +1221,11 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "totalSegments", len(b.building.segments.rawSegments), "numBlobs", len(kzgBlobs), ) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount b.messagesPerBatch.Update(uint64(postedMessages)) + b.batch4844History.Update(b.building.use4844) unpostedMessages := msgCount - b.building.msgCount messagesPerBatch := b.messagesPerBatch.Average() if messagesPerBatch == 0 { @@ -1350,3 +1365,56 @@ func (b *BatchPoster) StopAndWait() { b.dataPoster.StopAndWait() b.redisLock.StopAndWait() } + +type BoolRing struct { + buffer []bool + bufferPosition int +} + +func NewBoolRing(size int) *BoolRing { + return &BoolRing{ + buffer: make([]bool, 0, size), + } +} + +func (b *BoolRing) Update(value bool) { + period := cap(b.buffer) + if period == 0 { + return + } + if len(b.buffer) < period { + b.buffer = append(b.buffer, value) + } else { + b.buffer[b.bufferPosition] = value + } + b.bufferPosition = (b.bufferPosition + 1) % period +} + +func (b *BoolRing) Empty() bool { + return len(b.buffer) == 0 +} + +// Peek returns the most recently inserted value. +// Assumes not empty, check Empty() first +func (b *BoolRing) Peek() bool { + lastPosition := b.bufferPosition - 1 + if lastPosition < 0 { + // This is the case where we have wrapped around, since Peek() shouldn't + // be called without checking Empty(), so we can just use capactity. + lastPosition = cap(b.buffer) - 1 + } + return b.buffer[lastPosition] +} + +// All returns true if the BoolRing is full and all values equal value. +func (b *BoolRing) All(value bool) bool { + if len(b.buffer) < cap(b.buffer) { + return false + } + for _, v := range b.buffer { + if v != value { + return false + } + } + return true +} diff --git a/arbnode/batch_poster_test.go b/arbnode/batch_poster_test.go new file mode 100644 index 0000000000..15c9ae0c05 --- /dev/null +++ b/arbnode/batch_poster_test.go @@ -0,0 +1,70 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import "testing" + +func TestBoolRing(t *testing.T) { + b := NewBoolRing(3) + if !b.Empty() { + Fail(t, "not empty as expected") + } + + b.Update(true) + if !b.Peek() { + Fail(t, "Peek returned wrong value") + } + if b.All(true) { + Fail(t, "All shouldn't be true if buffer is not full") + } + + b.Update(true) + if !b.Peek() { + Fail(t, "Peek returned wrong value") + } + if b.All(true) { + Fail(t, "All shouldn't be true if buffer is not full") + } + + b.Update(true) + if !b.Peek() { + Fail(t, "Peek returned wrong value") + } + if !b.All(true) { + Fail(t, "Buffer was full of true, All(true) should be true") + } + + b.Update(false) + if b.Peek() { + Fail(t, "Peek returned wrong value") + } + if b.All(true) { + Fail(t, "Buffer is not full of true") + } + + b.Update(true) + if !b.Peek() { + Fail(t, "Peek returned wrong value") + } + if b.All(true) { + Fail(t, "Buffer is not full of true") + } + + b.Update(true) + if !b.Peek() { + Fail(t, "Peek returned wrong value") + } + if b.All(true) { + Fail(t, "Buffer is not full of true") + } + + b.Update(true) + if !b.Peek() { + Fail(t, "Peek returned wrong value") + } + if !b.All(true) { + Fail(t, "Buffer was full of true, All(true) should be true") + } + +} From 610d0be5ed9ac5438ebc82c46ee8bc71502051e5 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 28 Feb 2024 13:25:51 -0800 Subject: [PATCH 2/2] Simplify impl of batch type switching logic The ring buffer was not necessary, a simple counter will suffice to implement the same logic. --- arbnode/batch_poster.go | 14 ++++---- arbnode/batch_poster_test.go | 70 ------------------------------------ 2 files changed, 8 insertions(+), 76 deletions(-) delete mode 100644 arbnode/batch_poster_test.go diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 40d51e0192..bb5b05d1e4 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -91,7 +91,7 @@ type BatchPoster struct { dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] - batch4844History *BoolRing + non4844BatchCount int // Count of consecutive non-4844 batches posted // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. @@ -311,7 +311,6 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e bridgeAddr: opts.DeployInfo.Bridge, daWriter: opts.DAWriter, redisLock: redisLock, - batch4844History: NewBoolRing(16), } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) if err != nil { @@ -976,9 +975,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // "address already reserved". This logic makes sure that, if there is a backlog, // that enough non-4844 batches have been posted to fill a block before switching. if backlog == 0 || - b.batch4844History.Empty() || - b.batch4844History.Peek() || - b.batch4844History.All(false) { + b.non4844BatchCount == 0 || + b.non4844BatchCount > 16 { blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) @@ -1225,7 +1223,11 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount b.messagesPerBatch.Update(uint64(postedMessages)) - b.batch4844History.Update(b.building.use4844) + if b.building.use4844 { + b.non4844BatchCount = 0 + } else { + b.non4844BatchCount++ + } unpostedMessages := msgCount - b.building.msgCount messagesPerBatch := b.messagesPerBatch.Average() if messagesPerBatch == 0 { diff --git a/arbnode/batch_poster_test.go b/arbnode/batch_poster_test.go deleted file mode 100644 index 15c9ae0c05..0000000000 --- a/arbnode/batch_poster_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2024, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package arbnode - -import "testing" - -func TestBoolRing(t *testing.T) { - b := NewBoolRing(3) - if !b.Empty() { - Fail(t, "not empty as expected") - } - - b.Update(true) - if !b.Peek() { - Fail(t, "Peek returned wrong value") - } - if b.All(true) { - Fail(t, "All shouldn't be true if buffer is not full") - } - - b.Update(true) - if !b.Peek() { - Fail(t, "Peek returned wrong value") - } - if b.All(true) { - Fail(t, "All shouldn't be true if buffer is not full") - } - - b.Update(true) - if !b.Peek() { - Fail(t, "Peek returned wrong value") - } - if !b.All(true) { - Fail(t, "Buffer was full of true, All(true) should be true") - } - - b.Update(false) - if b.Peek() { - Fail(t, "Peek returned wrong value") - } - if b.All(true) { - Fail(t, "Buffer is not full of true") - } - - b.Update(true) - if !b.Peek() { - Fail(t, "Peek returned wrong value") - } - if b.All(true) { - Fail(t, "Buffer is not full of true") - } - - b.Update(true) - if !b.Peek() { - Fail(t, "Peek returned wrong value") - } - if b.All(true) { - Fail(t, "Buffer is not full of true") - } - - b.Update(true) - if !b.Peek() { - Fail(t, "Peek returned wrong value") - } - if !b.All(true) { - Fail(t, "Buffer was full of true, All(true) should be true") - } - -}