From 8b86e0e5016f01323ab870fa10971154d242965e Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 11 Jan 2024 16:17:37 +0000 Subject: [PATCH 001/103] fix starting second node in pruning system test, add more checks --- system_tests/pruning_test.go | 96 +++++++++++++++++++++++------------- 1 file changed, 62 insertions(+), 34 deletions(-) diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index ef82c0466e..bdb6be6a3c 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/pruning" "github.com/offchainlabs/nitro/execution/gethexec" @@ -32,35 +33,34 @@ func TestPruning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - var dataDir string - - func() { - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - _ = builder.Build(t) - dataDir = builder.dataDir - l2cleanupDone := false - defer func() { - if !l2cleanupDone { - builder.L2.cleanup() - } - builder.L1.cleanup() - }() - builder.L2Info.GenerateAccount("User2") - var txs []*types.Transaction - for i := uint64(0); i < 200; i++ { - tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) - txs = append(txs, tx) - err := builder.L2.Client.SendTransaction(ctx, tx) - Require(t, err) - } - for _, tx := range txs { - _, err := builder.L2.EnsureTxSucceeded(tx) - Require(t, err) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + _ = builder.Build(t) + l2cleanupDone := false + defer func() { + if !l2cleanupDone { + builder.L2.cleanup() } - l2cleanupDone = true - builder.L2.cleanup() - t.Log("stopped l2 node") + builder.L1.cleanup() + }() + builder.L2Info.GenerateAccount("User2") + var txs []*types.Transaction + for i := uint64(0); i < 200; i++ { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + } + for _, tx := range txs { + _, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + lastBlock, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + l2cleanupDone = true + builder.L2.cleanup() + t.Log("stopped l2 node") + func() { stack, err := node.New(builder.l2StackConfig) Require(t, err) defer stack.Close() @@ -105,15 +105,43 @@ func TestPruning(t *testing.T) { Fatal(t, "The db doesn't have less entries after pruning then before. Before:", chainDbEntriesBeforePruning, "After:", chainDbEntriesAfterPruning) } }() - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.dataDir = dataDir - cancel = builder.Build(t) - defer cancel() - builder.L2Info.GenerateAccount("User2") + testClient, cleanup := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: builder.l2StackConfig}) + defer cleanup() + + currentBlock := uint64(0) + // wait for the chain to catch up + for currentBlock < lastBlock { + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + } + + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + bc := testClient.ExecNode.Backend.ArbInterface().BlockChain() + triedb := bc.StateCache().TrieDB() + var start uint64 + if currentBlock+1 >= builder.execConfig.Caching.BlockCount { + start = currentBlock + 1 - builder.execConfig.Caching.BlockCount + } else { + start = 0 + } + for i := start; i <= currentBlock; i++ { + header := bc.GetHeaderByNumber(i) + _, err := bc.StateAt(header.Root) + Require(t, err) + tr, err := trie.New(trie.TrieID(header.Root), triedb) + Require(t, err) + it, err := tr.NodeIterator(nil) + Require(t, err) + for it.Next(true) { + } + Require(t, it.Error()) + } + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) - err := builder.L2.Client.SendTransaction(ctx, tx) + err = testClient.Client.SendTransaction(ctx, tx) Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) + _, err = testClient.EnsureTxSucceeded(tx) Require(t, err) } From 3424519b0521a7e47d5b06ce06e3d4eb64c5bcd9 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Sat, 13 Jan 2024 00:36:58 +0000 Subject: [PATCH 002/103] add delay while polling block number in pruning test --- system_tests/pruning_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/system_tests/pruning_test.go b/system_tests/pruning_test.go index bdb6be6a3c..e9e99dffcc 100644 --- a/system_tests/pruning_test.go +++ b/system_tests/pruning_test.go @@ -3,6 +3,7 @@ package arbtest import ( "context" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -114,6 +115,7 @@ func TestPruning(t *testing.T) { for currentBlock < lastBlock { currentBlock, err = testClient.Client.BlockNumber(ctx) Require(t, err) + time.Sleep(20 * time.Millisecond) } currentBlock, err = testClient.Client.BlockNumber(ctx) From 7876f5b7adf5aeae463899953c3b6d579915a976 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 17 Jan 2024 17:57:56 +0530 Subject: [PATCH 003/103] Add an option for the inbox reader to only read safe or finalized L1 blocks --- arbnode/inbox_reader.go | 102 ++++++++++++++++++++++++++++------------ arbnode/node.go | 7 +++ 2 files changed, 80 insertions(+), 29 deletions(-) diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 9c830e3c89..f452b0d890 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -31,6 +31,7 @@ type InboxReaderConfig struct { DefaultBlocksToRead uint64 `koanf:"default-blocks-to-read" reload:"hot"` TargetMessagesRead uint64 `koanf:"target-messages-read" reload:"hot"` MaxBlocksToRead uint64 `koanf:"max-blocks-to-read" reload:"hot"` + ReadMode string `koanf:"read-mode" reload:"hot"` } type InboxReaderConfigFetcher func() *InboxReaderConfig @@ -39,6 +40,12 @@ func (c *InboxReaderConfig) Validate() error { if c.MaxBlocksToRead == 0 || c.MaxBlocksToRead < c.DefaultBlocksToRead { return errors.New("inbox reader max-blocks-to-read cannot be zero or less than default-blocks-to-read") } + if c.ReadMode != "latest" { + c.ReadMode = strings.ToLower(c.ReadMode) + if c.ReadMode != "safe" && c.ReadMode != "finalized" { + return fmt.Errorf("inbox reader read-mode is invalid, want: safe or finalized, got: %s", c.ReadMode) + } + } return nil } @@ -50,6 +57,7 @@ func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".default-blocks-to-read", DefaultInboxReaderConfig.DefaultBlocksToRead, "the default number of blocks to read at once (will vary based on traffic by default)") f.Uint64(prefix+".target-messages-read", DefaultInboxReaderConfig.TargetMessagesRead, "if adjust-blocks-to-read is enabled, the target number of messages to read at once") f.Uint64(prefix+".max-blocks-to-read", DefaultInboxReaderConfig.MaxBlocksToRead, "if adjust-blocks-to-read is enabled, the maximum number of blocks to read at once") + f.String(prefix+".read-mode", DefaultInboxReaderConfig.ReadMode, "mode to only read safe or finalized L1 blocks. Takes string input, valid strings- safe, finalized") } var DefaultInboxReaderConfig = InboxReaderConfig{ @@ -60,6 +68,7 @@ var DefaultInboxReaderConfig = InboxReaderConfig{ DefaultBlocksToRead: 100, TargetMessagesRead: 500, MaxBlocksToRead: 2000, + ReadMode: "latest", } var TestInboxReaderConfig = InboxReaderConfig{ @@ -70,6 +79,7 @@ var TestInboxReaderConfig = InboxReaderConfig{ DefaultBlocksToRead: 100, TargetMessagesRead: 500, MaxBlocksToRead: 2000, + ReadMode: "latest", } type InboxReader struct { @@ -218,6 +228,7 @@ func (r *InboxReader) CaughtUp() chan struct{} { } func (r *InboxReader) run(ctx context.Context, hadError bool) error { + readMode := r.config().ReadMode from, err := r.getNextBlockToRead() if err != nil { return err @@ -238,38 +249,71 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } defer storeSeenBatchCount() // in case of error for { - - latestHeader, err := r.l1Reader.LastHeader(ctx) - if err != nil { - return err - } config := r.config() - currentHeight := latestHeader.Number - - neededBlockAdvance := config.DelayBlocks + arbmath.SaturatingUSub(config.MinBlocksToRead, 1) - neededBlockHeight := arbmath.BigAddByUint(from, neededBlockAdvance) - checkDelayTimer := time.NewTimer(config.CheckDelay) - WaitForHeight: - for arbmath.BigLessThan(currentHeight, neededBlockHeight) { - select { - case latestHeader = <-newHeaders: - if latestHeader == nil { - // shutting down + currentHeight := big.NewInt(0) + if readMode != "latest" { + var blockNum uint64 + fetchLatestSafeOrFinalized := func() { + if readMode == "safe" { + blockNum, err = r.l1Reader.LatestSafeBlockNr(ctx) + } else { + blockNum, err = r.l1Reader.LatestFinalizedBlockNr(ctx) + } + } + fetchLatestSafeOrFinalized() + if err != nil || blockNum == 0 { + return fmt.Errorf("inboxreader running in read only %s mode and unable to fetch latest %s block. err: %w", readMode, readMode, err) + } + currentHeight.SetUint64(blockNum) + // latest block in our db is newer than the latest safe/finalized block hence reset 'from' to match the last safe/finalized block number + if from.Uint64() > currentHeight.Uint64()+1 { + from.Set(currentHeight) + } + for currentHeight.Cmp(from) <= 0 { + select { + case <-newHeaders: + fetchLatestSafeOrFinalized() + if err != nil || blockNum == 0 { + return fmt.Errorf("inboxreader waiting for recent %s block and unable to fetch its block number. err: %w", readMode, err) + } + currentHeight.SetUint64(blockNum) + case <-ctx.Done(): return nil } - currentHeight = new(big.Int).Set(latestHeader.Number) - case <-ctx.Done(): - return nil - case <-checkDelayTimer.C: - break WaitForHeight } - } - checkDelayTimer.Stop() + } else { + + latestHeader, err := r.l1Reader.LastHeader(ctx) + if err != nil { + return err + } + currentHeight = latestHeader.Number + + neededBlockAdvance := config.DelayBlocks + arbmath.SaturatingUSub(config.MinBlocksToRead, 1) + neededBlockHeight := arbmath.BigAddByUint(from, neededBlockAdvance) + checkDelayTimer := time.NewTimer(config.CheckDelay) + WaitForHeight: + for arbmath.BigLessThan(currentHeight, neededBlockHeight) { + select { + case latestHeader = <-newHeaders: + if latestHeader == nil { + // shutting down + return nil + } + currentHeight = new(big.Int).Set(latestHeader.Number) + case <-ctx.Done(): + return nil + case <-checkDelayTimer.C: + break WaitForHeight + } + } + checkDelayTimer.Stop() - if config.DelayBlocks > 0 { - currentHeight = new(big.Int).Sub(currentHeight, new(big.Int).SetUint64(config.DelayBlocks)) - if currentHeight.Cmp(r.firstMessageBlock) < 0 { - currentHeight = new(big.Int).Set(r.firstMessageBlock) + if config.DelayBlocks > 0 { + currentHeight = new(big.Int).Sub(currentHeight, new(big.Int).SetUint64(config.DelayBlocks)) + if currentHeight.Cmp(r.firstMessageBlock) < 0 { + currentHeight = new(big.Int).Set(r.firstMessageBlock) + } } } @@ -358,7 +402,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { r.lastReadBatchCount = checkingBatchCount r.lastReadMutex.Unlock() storeSeenBatchCount() - if !r.caughtUp { + if !r.caughtUp && readMode == "latest" { r.caughtUp = true close(r.caughtUpChan) } @@ -406,7 +450,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { if err != nil { return err } - if !r.caughtUp && to.Cmp(currentHeight) == 0 { + if !r.caughtUp && to.Cmp(currentHeight) == 0 && readMode == "latest" { r.caughtUp = true close(r.caughtUpChan) } diff --git a/arbnode/node.go b/arbnode/node.go index f92dcefe7c..55bd69118d 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -99,6 +99,13 @@ func (c *Config) Validate() error { if c.DelayedSequencer.Enable && !c.Sequencer { return errors.New("cannot enable delayed sequencer without enabling sequencer") } + if c.InboxReader.ReadMode != "latest" { + if c.Sequencer { + return errors.New("cannot enable inboxreader in safe or finalized mode along with sequencer") + } + c.Feed.Output.Enable = false + c.Feed.Input.URL = []string{} + } if err := c.BlockValidator.Validate(); err != nil { return err } From 640e3e62f55431dc952da262f4bf37d65f8f79bd Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 12:07:34 -0800 Subject: [PATCH 004/103] Use 4844-only contracts branch --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 0a149d2af9..798934bc56 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 0a149d2af9aee566c4abf493479ec15e5fc32d98 +Subproject commit 798934bc5601ed9926ad9e8744575ecc075e0902 From 312efbb74cabc3ca2d36dfe307d929fbda9b24da Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 12:10:26 -0800 Subject: [PATCH 005/103] Handle MaxTimeVariation contract change --- arbnode/batch_poster.go | 12 ++++++------ arbnode/node.go | 8 ++++---- system_tests/full_challenge_impl_test.go | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index c4fc500d76..9b8089fbd6 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -921,22 +921,22 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, fmt.Errorf("error getting L1 bound block: %w", err) } - maxTimeVariation, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ + maxTimeVariationDelayBlocks, maxTimeVariationFutureBlocks, maxTimeVariationDelaySeconds, maxTimeVariationFutureSeconds, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ Context: ctx, BlockNumber: l1Bound.Number, }) if err != nil { // This might happen if the latest finalized block is old enough that our L1 node no longer has its state log.Warn("error getting max time variation on L1 bound block; falling back on latest block", "err", err) - maxTimeVariation, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) + maxTimeVariationDelayBlocks, maxTimeVariationFutureBlocks, maxTimeVariationDelaySeconds, maxTimeVariationFutureSeconds, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) if err != nil { return false, fmt.Errorf("error getting max time variation: %w", err) } } l1BoundBlockNumber := arbutil.ParentHeaderToL1BlockNumber(l1Bound) - l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariation.FutureBlocks)) - l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariation.FutureSeconds)) + l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariationFutureBlocks)) + l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariationFutureSeconds)) if config.L1BlockBoundBypass > 0 { latestHeader, err := b.l1Reader.LastHeader(ctx) @@ -947,8 +947,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) - l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelayBlocks)) - l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelaySeconds)) + l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelayBlocks)) + l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds)) } } diff --git a/arbnode/node.go b/arbnode/node.go index f92dcefe7c..6119a4fb5e 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -65,10 +65,10 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com // TODO could the ChainConfig be just []byte? ChainConfig: string(serializedChainConfig), SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: big.NewInt(60 * 60 * 24 / 15), - FutureBlocks: big.NewInt(12), - DelaySeconds: big.NewInt(60 * 60 * 24), - FutureSeconds: big.NewInt(60 * 60), + DelayBlocks: 60 * 60 * 24 / 15, + FutureBlocks: 12, + DelaySeconds: 60 * 60 * 24, + FutureSeconds: 60 * 60, }, } } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 118d17ec81..ecbfd67c7a 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -200,10 +200,10 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: big.NewInt(10000), - FutureBlocks: big.NewInt(10000), - DelaySeconds: big.NewInt(10000), - FutureSeconds: big.NewInt(10000), + DelayBlocks: 10000, + FutureBlocks: 10000, + DelaySeconds: 10000, + FutureSeconds: 10000, } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, From b9fa60817f908b9b1692cd65ad659f0048534125 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 12:11:24 -0800 Subject: [PATCH 006/103] Handle TimeBounds move to Bridge --- arbnode/delayed_seq_reorg_test.go | 8 ++++---- arbnode/sequencer_inbox.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index a28eebb5dc..9ad984ae6c 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -61,7 +61,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{1}, AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), AfterDelayedCount: 1, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -77,7 +77,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{2}, AfterDelayedAcc: userDelayed.AfterInboxAcc(), AfterDelayedCount: 2, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -91,7 +91,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{3}, AfterDelayedAcc: userDelayed.AfterInboxAcc(), AfterDelayedCount: 2, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -130,7 +130,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{2}, AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), AfterDelayedCount: 1, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index 2adfcb60b3..d0cdebfeff 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -102,7 +102,7 @@ type SequencerInboxBatch struct { AfterInboxAcc common.Hash AfterDelayedAcc common.Hash AfterDelayedCount uint64 - TimeBounds bridgegen.ISequencerInboxTimeBounds + TimeBounds bridgegen.IBridgeTimeBounds rawLog types.Log dataLocation batchDataLocation bridgeAddress common.Address From 659553f0af167228212f54367cb546ae0b3cf661 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 15:07:13 -0800 Subject: [PATCH 007/103] Placeholder zero addresses in DeploySequencerInbox --- deploy/deploy.go | 2 +- system_tests/full_challenge_impl_test.go | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/deploy/deploy.go b/deploy/deploy.go index bd2f2ec329..b1a3523cfd 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -40,7 +40,7 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, common.Address{}, common.Address{}) // TODO upload the DataHashReader and BlobBasefeeReader err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index ecbfd67c7a..c14f4c0d51 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -199,19 +199,13 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) - timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 10000, - FutureBlocks: 10000, - DelaySeconds: 10000, - FutureSeconds: 10000, - } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, l1Client, - bridgeAddr, l1Info.GetAddress("sequencer"), - timeBounds, big.NewInt(117964), + common.Address{}, // TODO addresses for DataHashReader and BlobBasefeeReader + common.Address{}, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 9e39151423eec4d66b175a8a87b9920a2feefdd2 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 18 Jan 2024 20:23:08 -0700 Subject: [PATCH 008/103] Get non-challenge tests passing --- Makefile | 1 + arbnode/batch_poster.go | 8 ++--- contracts | 2 +- deploy/deploy.go | 13 +++++++- solgen/gen.go | 38 ++++++++++++++++++++++++ system_tests/common_test.go | 7 ++++- system_tests/full_challenge_impl_test.go | 4 +-- 7 files changed, 63 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 4221100961..edc6c3a6d9 100644 --- a/Makefile +++ b/Makefile @@ -331,6 +331,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro .make/solidity: $(DEP_PREDICATE) contracts/src/*/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make yarn --cwd contracts build + yarn --cwd contracts build:forge:yul @touch $@ .make/yarndeps: $(DEP_PREDICATE) contracts/package.json contracts/yarn.lock $(ORDER_ONLY_PREDICATE) .make diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 9b8089fbd6..07034ee6f8 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -338,10 +338,7 @@ func AccessList(opts *AccessListOpts) types.AccessList { StorageKeys: []common.Hash{ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), // totalDelayedMessagesRead common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), // bridge - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // maxTimeVariation.delayBlocks - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // maxTimeVariation.futureBlocks - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // maxTimeVariation.delaySeconds - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // maxTimeVariation.futureSeconds + common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), // maxTimeVariation // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of // "eip1967.proxy.admin" subtracted by 1. common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), @@ -427,7 +424,8 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) if shouldHalt { logLevel = log.Error } - logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) + txErr := arbutil.DetailTxError(ctx, b.l1Reader.Client(), tx, r) + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) return shouldHalt, nil } } diff --git a/contracts b/contracts index 798934bc56..b95ab08544 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 798934bc5601ed9926ad9e8744575ecc075e0902 +Subproject commit b95ab08544ae339c5ee7e7d708c9acb5e4ab1f75 diff --git a/deploy/deploy.go b/deploy/deploy.go index b1a3523cfd..94a8e81a25 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -16,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" + "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -40,7 +41,17 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, common.Address{}, common.Address{}) // TODO upload the DataHashReader and BlobBasefeeReader + dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("data hashes reader deploy error: %w", err) + } + blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) + } + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, dataHashesReader, blobBasefeeReader) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/solgen/gen.go b/solgen/gen.go index 5d43946fa5..770fa08571 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -23,6 +23,15 @@ type HardHatArtifact struct { Bytecode string `json:"bytecode"` } +type FoundryBytecode struct { + Object string `json:"object"` +} + +type FoundryArtifact struct { + Abi []interface{} `json:"abi"` + Bytecode FoundryBytecode `json:"bytecode"` +} + type moduleInfo struct { contractNames []string abis []string @@ -96,6 +105,35 @@ func main() { modInfo.addArtifact(artifact) } + yulFilePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "out", "yul", "*", "*.json")) + if err != nil { + log.Fatal(err) + } + yulModInfo := modules["yulgen"] + if yulModInfo == nil { + yulModInfo = &moduleInfo{} + modules["yulgen"] = yulModInfo + } + for _, path := range yulFilePaths { + _, file := filepath.Split(path) + name := file[:len(file)-5] + + data, err := os.ReadFile(path) + if err != nil { + log.Fatal("could not read", path, "for contract", name, err) + } + + artifact := FoundryArtifact{} + if err := json.Unmarshal(data, &artifact); err != nil { + log.Fatal("failed to parse contract", name, err) + } + yulModInfo.addArtifact(HardHatArtifact{ + ContractName: name, + Abi: artifact.Abi, + Bytecode: artifact.Bytecode.Object, + }) + } + // add upgrade executor module which is not compiled locally, but imported from 'nitro-contracts' depedencies upgExecutorPath := filepath.Join(parent, "contracts", "node_modules", "@offchainlabs", "upgrade-executor", "build", "contracts", "src", "UpgradeExecutor.sol", "UpgradeExecutor.json") _, err = os.Stat(upgExecutorPath) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 2e17a50ede..a950ebd7ca 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -43,6 +43,9 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" + "github.com/ethereum/go-ethereum/eth/tracers" + _ "github.com/ethereum/go-ethereum/eth/tracers/js" + _ "github.com/ethereum/go-ethereum/eth/tracers/native" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -474,9 +477,10 @@ func createStackConfigForTest(dataDir string) *node.Config { stackConf.DataDir = dataDir stackConf.UseLightweightKDF = true stackConf.WSPort = 0 + stackConf.WSModules = append(stackConf.WSModules, "eth", "debug") stackConf.HTTPPort = 0 stackConf.HTTPHost = "" - stackConf.HTTPModules = append(stackConf.HTTPModules, "eth") + stackConf.HTTPModules = append(stackConf.HTTPModules, "eth", "debug") stackConf.P2P.NoDiscovery = true stackConf.P2P.NoDial = true stackConf.P2P.ListenAddr = "" @@ -605,6 +609,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no Namespace: "eth", Service: filters.NewFilterAPI(filters.NewFilterSystem(l1backend.APIBackend, filters.Config{}), false), }}) + stack.RegisterAPIs(tracers.APIs(l1backend.APIBackend)) Require(t, stack.Start()) Require(t, l1backend.StartMining()) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index c14f4c0d51..1a7747092e 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -204,8 +204,8 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Client, l1Info.GetAddress("sequencer"), big.NewInt(117964), - common.Address{}, // TODO addresses for DataHashReader and BlobBasefeeReader - common.Address{}, + common.Address{1}, // TODO addresses for DataHashReader and BlobBasefeeReader + common.Address{1}, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 5e2dfea42614e7bdfa500ce21a7ec8ba29a5107a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 19 Jan 2024 01:20:54 -0700 Subject: [PATCH 009/103] Use new SequencerInboxStub to fix challenge tests --- contracts | 2 +- system_tests/full_challenge_impl_test.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/contracts b/contracts index b95ab08544..77ce30ee83 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit b95ab08544ae339c5ee7e7d708c9acb5e4ab1f75 +Subproject commit 77ce30ee8393a7b489e42f7afdbe6f3966538e72 diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 1a7747092e..a2668d69e5 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -199,10 +199,18 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) + timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ + DelayBlocks: 10000, + FutureBlocks: 10000, + DelaySeconds: 10000, + FutureSeconds: 10000, + } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, l1Client, + bridgeAddr, l1Info.GetAddress("sequencer"), + timeBounds, big.NewInt(117964), common.Address{1}, // TODO addresses for DataHashReader and BlobBasefeeReader common.Address{1}, From 750a15f5f4505ef484b0539fdc86a292f144e3b3 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 19 Jan 2024 01:24:45 -0700 Subject: [PATCH 010/103] Use real blob data readers in challenge tests --- system_tests/full_challenge_impl_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index a2668d69e5..8dc9b83f32 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -35,6 +35,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" + "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" @@ -199,6 +200,14 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) + dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(&txOpts, l1Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Client, tx) + Require(t, err) + blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(&txOpts, l1Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Client, tx) + Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ DelayBlocks: 10000, FutureBlocks: 10000, @@ -212,8 +221,8 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Info.GetAddress("sequencer"), timeBounds, big.NewInt(117964), - common.Address{1}, // TODO addresses for DataHashReader and BlobBasefeeReader - common.Address{1}, + dataHashesReader, + blobBasefeeReader, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From cfd5fca27d57979c55fa6e8481d7608ccffaecc9 Mon Sep 17 00:00:00 2001 From: amsanghi Date: Fri, 19 Jan 2024 14:25:10 +0530 Subject: [PATCH 011/103] =?UTF-8?q?Add=20a=20CLI=20option=20for=20the=20RP?= =?UTF-8?q?C=20=E2=80=9Cfinalized=E2=80=9D=20block=20to=20wait=20for=20the?= =?UTF-8?q?=20block=20validator=20to=20complete?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- arbnode/sync_monitor.go | 47 ++++++++++++++++++++++++++++++++++----- staker/block_validator.go | 6 +++++ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 598ea4fb34..90621d9b5c 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -26,21 +26,27 @@ func NewSyncMonitor(config *SyncMonitorConfig) *SyncMonitor { } type SyncMonitorConfig struct { - BlockBuildLag uint64 `koanf:"block-build-lag"` - BlockBuildSequencerInboxLag uint64 `koanf:"block-build-sequencer-inbox-lag"` - CoordinatorMsgLag uint64 `koanf:"coordinator-msg-lag"` + BlockBuildLag uint64 `koanf:"block-build-lag"` + BlockBuildSequencerInboxLag uint64 `koanf:"block-build-sequencer-inbox-lag"` + CoordinatorMsgLag uint64 `koanf:"coordinator-msg-lag"` + SafeBlockWaitForBlockValidator bool `koanf:"safe-block-wait-for-block-validator"` + FinalizedBlockWaitForBlockValidator bool `koanf:"finalized-block-wait-for-block-validator"` } var DefaultSyncMonitorConfig = SyncMonitorConfig{ - BlockBuildLag: 20, - BlockBuildSequencerInboxLag: 0, - CoordinatorMsgLag: 15, + BlockBuildLag: 20, + BlockBuildSequencerInboxLag: 0, + CoordinatorMsgLag: 15, + SafeBlockWaitForBlockValidator: false, + FinalizedBlockWaitForBlockValidator: false, } func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".block-build-lag", DefaultSyncMonitorConfig.BlockBuildLag, "allowed lag between messages read and blocks built") f.Uint64(prefix+".block-build-sequencer-inbox-lag", DefaultSyncMonitorConfig.BlockBuildSequencerInboxLag, "allowed lag between messages read from sequencer inbox and blocks built") f.Uint64(prefix+".coordinator-msg-lag", DefaultSyncMonitorConfig.CoordinatorMsgLag, "allowed lag between local and remote messages") + f.Bool(prefix+".safe-block-wait-for-block-validator", DefaultSyncMonitorConfig.SafeBlockWaitForBlockValidator, "wait for block validator to complete before returning safe block number") + f.Bool(prefix+".finalized-block-wait-for-block-validator", DefaultSyncMonitorConfig.FinalizedBlockWaitForBlockValidator, "wait for block validator to complete before returning finalized block number") } func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator, exec execution.FullExecutionClient) { @@ -153,10 +159,29 @@ func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { if err != nil { return 0, err } + // If SafeBlockWaitForBlockValidator is true, we want to wait for the block validator to finish + if s.config.SafeBlockWaitForBlockValidator { + latestValidatedCount, err := s.getLatestValidatedCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } block := s.exec.MessageIndexToBlockNumber(msg - 1) return block, nil } +func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { + latestValidatedGS := s.txStreamer.validator.GetLastValidated() + count, err := s.inboxReader.tracker.GetBatchMessageCount(latestValidatedGS.Batch - 1) + if err != nil { + return 0, err + } + count += arbutil.MessageIndex(latestValidatedGS.PosInBatch) +} + func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { if s.inboxReader == nil || !s.initialized { return 0, errors.New("not set up for safeblock") @@ -165,6 +190,16 @@ func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) if err != nil { return 0, err } + // If FinalizedBlockWaitForBlockValidator is true, we want to wait for the block validator to finish + if s.config.FinalizedBlockWaitForBlockValidator { + latestValidatedCount, err := s.getLatestValidatedCount() + if err != nil { + return 0, err + } + if msg > latestValidatedCount { + msg = latestValidatedCount + } + } block := s.exec.MessageIndexToBlockNumber(msg - 1) return block, nil } diff --git a/staker/block_validator.go b/staker/block_validator.go index 352335a5db..a331126735 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1206,3 +1206,9 @@ func (v *BlockValidator) WaitForPos(t *testing.T, ctx context.Context, pos arbut } } } + +func (v *BlockValidator) GetLastValidated() validator.GoGlobalState { + v.reorgMutex.Lock() + defer v.reorgMutex.Unlock() + return v.lastValidGS +} From ecd379f524e860432345a67edfd55ef05b0e23f9 Mon Sep 17 00:00:00 2001 From: amsanghi Date: Fri, 19 Jan 2024 14:31:50 +0530 Subject: [PATCH 012/103] Fix --- arbnode/sync_monitor.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 90621d9b5c..5fa9dbabcf 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -180,6 +180,7 @@ func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { return 0, err } count += arbutil.MessageIndex(latestValidatedGS.PosInBatch) + return count, nil } func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { From 3619cd1a097e9f62de69a6acd970c38034a69120 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 19 Jan 2024 10:39:18 -0700 Subject: [PATCH 013/103] Update contracts to latest (unified 4844 reader contract) --- contracts | 2 +- deploy/deploy.go | 9 ++------- system_tests/full_challenge_impl_test.go | 9 ++------- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/contracts b/contracts index 77ce30ee83..a8e7709bfc 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 77ce30ee8393a7b489e42f7afdbe6f3966538e72 +Subproject commit a8e7709bfc918f9b8e2888d47f2fd8454779fd11 diff --git a/deploy/deploy.go b/deploy/deploy.go index 94a8e81a25..59760e2c21 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -41,17 +41,12 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("data hashes reader deploy error: %w", err) - } - blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(auth, client) + reader4844, tx, _, err := yulgen.DeployReader4844(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, dataHashesReader, blobBasefeeReader) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 8dc9b83f32..0fa483b6ea 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -200,11 +200,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) - dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(&txOpts, l1Client) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1Client, tx) - Require(t, err) - blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(&txOpts, l1Client) + reader4844, tx, _, err := yulgen.DeployReader4844(&txOpts, l1Client) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) @@ -221,8 +217,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Info.GetAddress("sequencer"), timeBounds, big.NewInt(117964), - dataHashesReader, - blobBasefeeReader, + reader4844, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 02e2be04e5d3e0e415e2119f1fee5202b5490a3c Mon Sep 17 00:00:00 2001 From: amsanghi Date: Mon, 22 Jan 2024 20:25:30 +0530 Subject: [PATCH 014/103] Prefetch state needed for future block executions by executing them in parallel against old state --- arbnode/transaction_streamer.go | 10 ++++++- execution/gethexec/executionengine.go | 38 +++++++++++++++++++++++---- execution/gethexec/node.go | 4 +-- execution/gethexec/sequencer.go | 7 +++++ execution/interface.go | 2 +- 5 files changed, 52 insertions(+), 9 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 24ef2a7cc4..5491cbdbf2 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -968,7 +968,15 @@ func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec execution log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos) return false } - err = s.exec.DigestMessage(pos, msg) + var msgForPrefetch *arbostypes.MessageWithMetadata + if pos+1 < msgCount { + msgForPrefetch, err = s.GetMessage(pos + 1) + if err != nil { + log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos+1) + return false + } + } + err = s.exec.DigestMessage(pos, msg, msgForPrefetch) if err != nil { logger := log.Warn if prevMessageCount < msgCount { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 58e91a197e..d376c59ba2 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -41,6 +41,8 @@ type ExecutionEngine struct { nextScheduledVersionCheck time.Time // protected by the createBlocksMutex reorgSequencing bool + + prefetchBlock bool } func NewExecutionEngine(bc *core.BlockChain) (*ExecutionEngine, error) { @@ -71,6 +73,16 @@ func (s *ExecutionEngine) EnableReorgSequencing() { s.reorgSequencing = true } +func (s *ExecutionEngine) EnablePrefetchBlock() { + if s.Started() { + panic("trying to enable prefetch block after start") + } + if s.prefetchBlock { + panic("trying to enable prefetch block when already set") + } + s.prefetchBlock = true +} + func (s *ExecutionEngine) SetTransactionStreamer(streamer execution.TransactionStreamer) { if s.Started() { panic("trying to set transaction streamer after start") @@ -107,7 +119,11 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost return err } for i := range newMessages { - err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i]) + var msgForPrefetch *arbostypes.MessageWithMetadata + if i < len(newMessages)-1 { + msgForPrefetch = &newMessages[i] + } + err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) if err != nil { return err } @@ -486,15 +502,15 @@ func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.Mess return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } -func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { if !s.createBlocksMutex.TryLock() { return errors.New("createBlock mutex held") } defer s.createBlocksMutex.Unlock() - return s.digestMessageWithBlockMutex(num, msg) + return s.digestMessageWithBlockMutex(num, msg, msgForPrefetch) } -func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { currentHeader, err := s.getCurrentHeader() if err != nil { return err @@ -508,11 +524,23 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, } startTime := time.Now() + var wg sync.WaitGroup + if s.prefetchBlock && msgForPrefetch != nil { + wg.Add(1) + go func() { + defer wg.Done() + _, _, _, err := s.createBlockFromNextMessage(msgForPrefetch) + if err != nil { + return + } + }() + } + block, statedb, receipts, err := s.createBlockFromNextMessage(msg) if err != nil { return err } - + wg.Wait() err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { return err diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 00337cc355..1ad73febe7 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -311,8 +311,8 @@ func (n *ExecutionNode) StopAndWait() { // } } -func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { - return n.ExecEngine.DigestMessage(num, msg) +func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { + return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { return n.ExecEngine.Reorg(count, newMessages, oldMessages) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 5db38cbb4d..9bc6f4378d 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -66,6 +66,7 @@ type SequencerConfig struct { MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` + EnablePrefetchBlock bool `koanf:"enable-prefetch-block"` } func (c *SequencerConfig) Validate() error { @@ -97,6 +98,7 @@ var DefaultSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, + EnablePrefetchBlock: false, } var TestSequencerConfig = SequencerConfig{ @@ -112,6 +114,7 @@ var TestSequencerConfig = SequencerConfig{ MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, + EnablePrefetchBlock: false, } func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -127,6 +130,7 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") + f.Bool(prefix+".enable-prefetch-block", DefaultSequencerConfig.EnablePrefetchBlock, "enable prefetching of blocks") } type txQueueItem struct { @@ -324,6 +328,9 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead } s.Pause() execEngine.EnableReorgSequencing() + if config.EnablePrefetchBlock { + execEngine.EnablePrefetchBlock() + } return s, nil } diff --git a/execution/interface.go b/execution/interface.go index ef9409b9c1..414c31f64e 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -28,7 +28,7 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { - DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error + DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) From 574fb738c71290d3055df78ae01f80d7e57cccf2 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Jan 2024 08:54:16 -0800 Subject: [PATCH 015/103] go mod tidy after merging in geth-1.13 branch --- go.mod | 16 ++++++++-------- go.sum | 38 ++++++++++++++++++++------------------ 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index bdda6a61a1..f6f6bce80d 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 @@ -53,6 +53,7 @@ require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/DataDog/zstd v1.5.2 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect @@ -97,7 +98,7 @@ require ( github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -131,7 +132,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect - github.com/huin/goupnp v1.1.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.1.1 // indirect @@ -307,13 +308,13 @@ require ( ) require ( - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect - github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-redis/redis/v8 v8.11.4 github.com/go-stack/stack v1.8.1 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect @@ -333,8 +334,7 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index bf8b4b826d..a0b83650a2 100644 --- a/go.sum +++ b/go.sum @@ -55,6 +55,8 @@ github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKz github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= @@ -63,8 +65,8 @@ github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fT github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= @@ -223,15 +225,15 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -305,8 +307,8 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -401,8 +403,8 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -638,8 +640,8 @@ github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZm github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= -github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -1598,10 +1600,10 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= -github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= @@ -1948,6 +1950,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1987,7 +1990,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2009,6 +2011,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2249,8 +2253,6 @@ gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= From fe654290f345218a632139787f63cc5e39275c92 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Jan 2024 17:46:13 -0800 Subject: [PATCH 016/103] Port BlobClient from old 4844 branch This ports BlobClient from the eip-4844-experimental branch, with the prysm dependency removed (relevant code copied to util/beaconclient) and the kZGToVersionedHash function copied from geth rather than modifying our fork to make it public as I had done before, since it is so simple. "A little copying is better than a little dependency." - Rob Pike, Go Proverbs --- arbnode/blob_reader.go | 185 +++++++++++++++++++++++++++++++++++ go.mod | 2 +- util/beaconclient/client.go | 98 +++++++++++++++++++ util/beaconclient/errors.go | 40 ++++++++ util/beaconclient/options.go | 48 +++++++++ 5 files changed, 372 insertions(+), 1 deletion(-) create mode 100644 arbnode/blob_reader.go create mode 100644 util/beaconclient/client.go create mode 100644 util/beaconclient/errors.go create mode 100644 util/beaconclient/options.go diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go new file mode 100644 index 0000000000..673df37b1f --- /dev/null +++ b/arbnode/blob_reader.go @@ -0,0 +1,185 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/beaconclient" + "github.com/offchainlabs/nitro/util/pretty" + "github.com/pkg/errors" + + "github.com/spf13/pflag" +) + +type BlobClient struct { + bc *beaconclient.Client + ec arbutil.L1Interface + + // The genesis time time won't change so only request it once. + cachedGenesisTime uint64 +} + +type BlobClientConfig struct { + BeaconChainUrl string `koanf:"beacon-chain-url"` +} + +var DefaultBlobClientConfig = BlobClientConfig{ + BeaconChainUrl: "", +} + +func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") +} + +func NewBlobClient(bc *beaconclient.Client, ec arbutil.L1Interface) *BlobClient { + return &BlobClient{bc: bc, ec: ec} +} + +// Get all the blobs associated with a particular block. +func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { + header, err := b.ec.HeaderByHash(ctx, blockHash) + if err != nil { + return nil, err + } + + genesisTime, err := b.genesisTime(ctx) + if err != nil { + return nil, err + } + + // TODO make denominator configurable for devnets with faster block time + slot := (header.Time - genesisTime) / 12 + + return b.blobSidecars(ctx, slot, versionedHashes) +} + +type blobResponse struct { + Data []blobResponseItem `json:"data"` +} +type blobResponseItem struct { + BlockRoot string `json:"block_root"` + Index int `json:"index"` + Slot uint64 `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex uint64 `json:"proposer_index"` + Blob string `json:"blob"` + KzgCommitment string `json:"kzg_commitment"` + KzgProof string `json:"kzg_proof"` +} + +func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { + body, err := b.bc.Get(ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + if err != nil { + return nil, errors.Wrap(err, "error calling beacon client in blobSidecars") + } + + br := &blobResponse{} + err = json.Unmarshal(body, br) + if err != nil { + return nil, errors.Wrap(err, "error decoding json response in blobSidecars") + } + + if len(br.Data) == 0 { + return nil, fmt.Errorf("no blobs found for slot %d", slot) + } + + blobs := make([]kzg4844.Blob, len(versionedHashes)) + var totalFound int + + for i := range blobs { + commitmentBytes, err := hexutil.Decode(br.Data[i].KzgCommitment) + if err != nil { + return nil, fmt.Errorf("couldn't decode commitment for slot(%d) at index(%d), commitment(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgCommitment)) + } + var commitment kzg4844.Commitment + copy(commitment[:], commitmentBytes) + versionedHash := kZGToVersionedHash(commitment) + + // The versioned hashes of the blob commitments are produced in the by HASH_OPCODE_BYTE, + // presumably in the order they were added to the tx. The spec is unclear if the blobs + // need to be returned in any particular order from the beacon API, so we put them back in + // the order from the tx. + var j int + var found bool + for j = range versionedHashes { + if versionedHashes[j] == versionedHash { + found = true + totalFound++ + break + } + } + if !found { + continue + } + + blob, err := hexutil.Decode(br.Data[i].Blob) + if err != nil { + return nil, fmt.Errorf("couldn't decode blob for slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) + } + copy(blobs[j][:], blob) + + proofBytes, err := hexutil.Decode(br.Data[i].KzgProof) + if err != nil { + return nil, fmt.Errorf("couldn't decode proof for slot(%d) at index(%d), proof(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgProof)) + } + var proof kzg4844.Proof + copy(proof[:], proofBytes) + + err = kzg4844.VerifyBlobProof(blobs[j], commitment, proof) + if err != nil { + return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) + } + } + + if totalFound < len(versionedHashes) { + return nil, fmt.Errorf("not all of the requested blobs (%d/%d) were found at slot (%d), can't reconstruct batch payload", totalFound, len(versionedHashes), slot) + } + + return blobs, nil +} + +type genesisResponse struct { + GenesisTime uint64 `json:"genesis_time"` + // don't currently care about other fields, add if needed +} + +func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { + if b.cachedGenesisTime > 0 { + return b.cachedGenesisTime, nil + } + + body, err := b.bc.Get(ctx, "/eth/v1/beacon/genesis") + if err != nil { + return 0, errors.Wrap(err, "error calling beacon client in genesisTime") + } + + gr := &genesisResponse{} + dataWrapper := &struct{ Data *genesisResponse }{Data: gr} + err = json.Unmarshal(body, dataWrapper) + if err != nil { + return 0, errors.Wrap(err, "error decoding json response in genesisTime") + } + + return gr.GenesisTime, nil +} + +// The following code is taken from core/vm/contracts.go +const ( + blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. +) + +func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { + h := sha256.Sum256(kzg[:]) + h[0] = blobCommitmentVersionKZG + + return h +} diff --git a/go.mod b/go.mod index f6f6bce80d..69cbcd9884 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multihash v0.2.1 + github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 @@ -233,7 +234,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect diff --git a/util/beaconclient/client.go b/util/beaconclient/client.go new file mode 100644 index 0000000000..e2dfd8e6bf --- /dev/null +++ b/util/beaconclient/client.go @@ -0,0 +1,98 @@ +package beaconclient + +import ( + "context" + "io" + "net" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +// Client is a wrapper object around the HTTP client. +// Taken from prysm/api/client. +type Client struct { + hc *http.Client + baseURL *url.URL + token string +} + +// NewClient constructs a new client with the provided options (ex WithTimeout). +// `host` is the base host + port used to construct request urls. This value can be +// a URL string, or NewClient will assume an http endpoint if just `host:port` is used. +func NewClient(host string, opts ...ClientOpt) (*Client, error) { + u, err := urlForHost(host) + if err != nil { + return nil, err + } + c := &Client{ + hc: &http.Client{}, + baseURL: u, + } + for _, o := range opts { + o(c) + } + return c, nil +} + +// Token returns the bearer token used for jwt authentication +func (c *Client) Token() string { + return c.token +} + +// BaseURL returns the base url of the client +func (c *Client) BaseURL() *url.URL { + return c.baseURL +} + +// Do execute the request against the http client +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return c.hc.Do(req) +} + +func urlForHost(h string) (*url.URL, error) { + // try to parse as url (being permissive) + u, err := url.Parse(h) + if err == nil && u.Host != "" { + return u, nil + } + // try to parse as host:port + host, port, err := net.SplitHostPort(h) + if err != nil { + return nil, ErrMalformedHostname + } + return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil +} + +// NodeURL returns a human-readable string representation of the beacon node base url. +func (c *Client) NodeURL() string { + return c.baseURL.String() +} + +// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package. +func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) { + u := c.baseURL.ResolveReference(&url.URL{Path: path}) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + for _, o := range opts { + o(req) + } + r, err := c.hc.Do(req) + if err != nil { + return nil, err + } + defer func() { + err = r.Body.Close() + }() + if r.StatusCode != http.StatusOK { + return nil, Non200Err(r) + } + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, errors.Wrap(err, "error reading http response body") + } + return b, nil +} diff --git a/util/beaconclient/errors.go b/util/beaconclient/errors.go new file mode 100644 index 0000000000..7ee88805cd --- /dev/null +++ b/util/beaconclient/errors.go @@ -0,0 +1,40 @@ +package beaconclient + +import ( + "fmt" + "io" + "net/http" + + "github.com/pkg/errors" +) + +// ErrMalformedHostname is used to indicate if a host name's format is incorrect. +var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500") + +// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code. +// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK. +var ErrNotOK = errors.New("did not receive 2xx response from API") + +// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API. +var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API") + +// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized. +var ErrInvalidNodeVersion = errors.New("invalid node version response") + +// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error. +func Non200Err(response *http.Response) error { + bodyBytes, err := io.ReadAll(response.Body) + var body string + if err != nil { + body = "(Unable to read response body.)" + } else { + body = "response body:\n" + string(bodyBytes) + } + msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body) + switch response.StatusCode { + case 404: + return errors.Wrap(ErrNotFound, msg) + default: + return errors.Wrap(ErrNotOK, msg) + } +} diff --git a/util/beaconclient/options.go b/util/beaconclient/options.go new file mode 100644 index 0000000000..98a37e17a0 --- /dev/null +++ b/util/beaconclient/options.go @@ -0,0 +1,48 @@ +package beaconclient + +import ( + "fmt" + "net/http" + "time" +) + +// ReqOption is a request functional option. +type ReqOption func(*http.Request) + +// WithSSZEncoding is a request functional option that adds SSZ encoding header. +func WithSSZEncoding() ReqOption { + return func(req *http.Request) { + req.Header.Set("Accept", "application/octet-stream") + } +} + +// WithAuthorizationToken is a request functional option that adds header for authorization token. +func WithAuthorizationToken(token string) ReqOption { + return func(req *http.Request) { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + } +} + +// ClientOpt is a functional option for the Client type (http.Client wrapper) +type ClientOpt func(*Client) + +// WithTimeout sets the .Timeout attribute of the wrapped http.Client. +func WithTimeout(timeout time.Duration) ClientOpt { + return func(c *Client) { + c.hc.Timeout = timeout + } +} + +// WithRoundTripper replaces the underlying HTTP's transport with a custom one. +func WithRoundTripper(t http.RoundTripper) ClientOpt { + return func(c *Client) { + c.hc.Transport = t + } +} + +// WithAuthenticationToken sets an oauth token to be used. +func WithAuthenticationToken(token string) ClientOpt { + return func(c *Client) { + c.token = token + } +} From 11e61aafd6b41623e5989cd64dd46ad97cfc1a9a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:07:46 -0700 Subject: [PATCH 017/103] Implement 4844 blob reading in replay binary --- arbnode/delayed_seq_reorg_test.go | 2 +- arbnode/inbox_tracker.go | 14 ++++-- arbnode/node.go | 16 +++++- arbstate/das_reader.go | 7 +++ arbstate/inbox.go | 49 ++++++++++++++++-- arbstate/inbox_fuzz_test.go | 9 ++-- cmd/pruning/pruning.go | 2 +- cmd/replay/main.go | 32 ++++++++++-- staker/stateless_block_validator.go | 4 ++ system_tests/state_fuzz_test.go | 8 +-- util/blobs/blobs.go | 77 +++++++++++++++++++++++++++++ 11 files changed, 196 insertions(+), 24 deletions(-) create mode 100644 util/blobs/blobs.go diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index 9ad984ae6c..beb2656e2b 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil) + tracker, err := NewInboxTracker(db, streamer, nil, nil) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 51f74cbeb4..eaf863bffc 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -38,12 +38,13 @@ type InboxTracker struct { mutex sync.Mutex validator *staker.BlockValidator das arbstate.DataAvailabilityReader + blobReader arbstate.BlobReader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) { // We support a nil txStreamer for the pruning code if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { return nil, errors.New("data availability service required but unconfigured") @@ -52,6 +53,7 @@ func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arb db: db, txStreamer: txStreamer, das: das, + blobReader: blobReader, batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil @@ -504,11 +506,12 @@ type multiplexerBackend struct { inbox *InboxTracker } -func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, error) { +func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if len(b.batches) == 0 { - return nil, errors.New("read past end of specified sequencer batches") + return nil, common.Hash{}, errors.New("read past end of specified sequencer batches") } - return b.batches[0].Serialize(b.ctx, b.client) + bytes, err := b.batches[0].Serialize(b.ctx, b.client) + return bytes, b.batches[0].BlockHash, err } func (b *multiplexerBackend) GetSequencerInboxPosition() uint64 { @@ -603,7 +606,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, arbstate.KeysetValidate) + + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.blobReader, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index 6119a4fb5e..99ecb541ee 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -39,6 +40,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" + "github.com/offchainlabs/nitro/util/beaconclient" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" @@ -85,6 +87,7 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + BlobClient BlobClientConfig `koanf:"blob-client"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` @@ -142,6 +145,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) + BlobClientAddOptions(prefix+".blob-client", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) @@ -512,7 +516,17 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader) + var blobReader arbstate.BlobReader + if config.BlobClient.BeaconChainUrl != "" { + bc, err := beaconclient.NewClient(config.BlobClient.BeaconChainUrl) + if err != nil { + return nil, err + } + + blobReader = NewBlobClient(bc, l1client) + } + + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) if err != nil { return nil, err } diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index a6d351b49e..5f568605b1 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -40,6 +40,9 @@ const L1AuthenticatedMessageHeaderFlag byte = 0x40 // ZeroheavyMessageHeaderFlag indicates that this message is zeroheavy-encoded. const ZeroheavyMessageHeaderFlag byte = 0x20 +// BlobHashesHeaderFlag indicates that this message contains EIP 4844 versioned hashes of the committments calculated over the blob data for the batch data. +const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x50 + // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 @@ -55,6 +58,10 @@ func IsZeroheavyEncodedHeaderByte(header byte) bool { return (ZeroheavyMessageHeaderFlag & header) > 0 } +func IsBlobHashesHeaderByte(header byte) bool { + return (BlobHashesHeaderFlag & header) > 0 +} + func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3995bcf308..cf8f61e97a 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -8,11 +8,13 @@ import ( "context" "encoding/binary" "errors" + "fmt" "io" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -21,11 +23,12 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/zeroheavy" ) type InboxBackend interface { - PeekSequencerInbox() ([]byte, error) + PeekSequencerInbox() ([]byte, common.Hash, error) GetSequencerInboxPosition() uint64 AdvanceSequencerInbox() @@ -36,6 +39,14 @@ type InboxBackend interface { ReadDelayedInbox(seqNum uint64) (*arbostypes.L1IncomingMessage, error) } +type BlobReader interface { + GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, + ) ([]kzg4844.Blob, error) +} + type sequencerMessage struct { minTimestamp uint64 maxTimestamp uint64 @@ -50,7 +61,7 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -79,6 +90,31 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da } } + if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { + blobHashes := payload[1:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + + if blobReader == nil { + return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") + } + + kzgBlobs, err := blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err = blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return parsedMsg, nil + } + } + if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { @@ -242,6 +278,7 @@ type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 dasReader DataAvailabilityReader + blobReader BlobReader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -251,11 +288,12 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, dasReader: dasReader, + blobReader: blobReader, keysetValidationMode: keysetValidationMode, } } @@ -270,13 +308,14 @@ const BatchSegmentKindAdvanceL1BlockNumber uint8 = 4 // Note: this does *not* return parse errors, those are transformed into invalid messages func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMetadata, error) { if r.cachedSequencerMessage == nil { - bytes, realErr := r.backend.PeekSequencerInbox() + // Note: batchBlockHash will be zero in the replay binary, but that's fine + bytes, batchBlockHash, realErr := r.backend.PeekSequencerInbox() if realErr != nil { return nil, realErr } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, bytes, r.dasReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dasReader, r.blobReader, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index fcb80cbd73..dcf43fd0da 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -9,6 +9,7 @@ import ( "errors" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" ) @@ -19,11 +20,11 @@ type multiplexerBackend struct { positionWithinMessage uint64 } -func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, error) { +func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if b.batchSeqNum != 0 { - return nil, errors.New("reading unknown sequencer batch") + return nil, common.Hash{}, errors.New("reading unknown sequencer batch") } - return b.batch, nil + return b.batch, common.Hash{}, nil } func (b *multiplexerBackend) GetSequencerInboxPosition() uint64 { @@ -66,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 68d89302f0..da015ac52c 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) if err != nil { return nil, err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 2fb13ceed8..b634a2d5bb 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -62,11 +63,12 @@ func (c WavmChainContext) GetHeader(hash common.Hash, num uint64) *types.Header type WavmInbox struct{} -func (i WavmInbox) PeekSequencerInbox() ([]byte, error) { +func (i WavmInbox) PeekSequencerInbox() ([]byte, common.Hash, error) { pos := wavmio.GetInboxPosition() res := wavmio.ReadInboxMessage(pos) log.Info("PeekSequencerInbox", "pos", pos, "res[:8]", res[:8]) - return res, nil + // Our BlobPreimageReader doesn't need the block hash + return res, common.Hash{}, nil } func (i WavmInbox) GetSequencerInboxPosition() uint64 { @@ -117,6 +119,30 @@ func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbst return arbstate.DiscardImmediately, nil } +type BlobPreimageReader struct { +} + +func (r *BlobPreimageReader) GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, +) ([]kzg4844.Blob, error) { + var blobs []kzg4844.Blob + for _, h := range versionedHashes { + var blob kzg4844.Blob + var preimage []byte + if true { + panic("TODO: fill in preimage with wavmio.ResolvePreimage(h, wavmio.PreimageTypeEthVersionedHash) once KZG proof support is merged into this branch") + } + if len(preimage) != len(blob) { + return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) + } + copy(blob[:], preimage) + blobs = append(blobs, blob) + } + return blobs, nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -180,7 +206,7 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, &BlobPreimageReader{}, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index acd86f8627..d35304be27 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -284,6 +284,10 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } + if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { + // TODO: fetch blob preimages + panic("TODO: fetch blob preimages") + } if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { continue } diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index b14215fbf0..28bcbec9b4 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -69,11 +69,11 @@ type inboxBackend struct { delayedMessages [][]byte } -func (b *inboxBackend) PeekSequencerInbox() ([]byte, error) { +func (b *inboxBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if len(b.batches) == 0 { - return nil, errors.New("read past end of specified sequencer batches") + return nil, common.Hash{}, errors.New("read past end of specified sequencer batches") } - return b.batches[0], nil + return b.batches[0], common.Hash{}, nil } func (b *inboxBackend) GetSequencerInboxPosition() uint64 { diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go new file mode 100644 index 0000000000..c8025dc253 --- /dev/null +++ b/util/blobs/blobs.go @@ -0,0 +1,77 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package blobs + +import ( + "crypto/sha256" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +// EncodeBlobs takes in raw bytes data to convert into blobs used for KZG commitment EIP-4844 +// transactions on Ethereum. +func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { + data, err := rlp.EncodeToBytes(data) + if err != nil { + return nil, err + } + blobs := []kzg4844.Blob{{}} + blobIndex := 0 + fieldIndex := -1 + for i := 0; i < len(data); i += 31 { + fieldIndex++ + if fieldIndex == params.BlobTxFieldElementsPerBlob { + blobs = append(blobs, kzg4844.Blob{}) + blobIndex++ + fieldIndex = 0 + } + max := i + 31 + if max > len(data) { + max = len(data) + } + copy(blobs[blobIndex][fieldIndex*32+1:], data[i:max]) + } + return blobs, nil +} + +// DecodeBlobs decodes blobs into the batch data encoded in them. +func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { + var rlpData []byte + for _, blob := range blobs { + for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { + rlpData = append(rlpData, blob[fieldIndex*32+1:(fieldIndex+1)*32]...) + } + } + var outputData []byte + err := rlp.DecodeBytes(rlpData, &outputData) + return outputData, err +} + +// Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs +func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) { + commitments := make([]kzg4844.Commitment, len(blobs)) + proofs := make([]kzg4844.Proof, len(blobs)) + versionedHashes := make([]common.Hash, len(blobs)) + + for i := range blobs { + var err error + commitments[i], err = kzg4844.BlobToCommitment(blobs[i]) + if err != nil { + return nil, nil, nil, err + } + proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) + if err != nil { + return nil, nil, nil, err + } + // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. + hash := sha256.Sum256(commitments[i][:]) + hash[0] = 1 + versionedHashes[i] = hash + } + + return commitments, proofs, versionedHashes, nil +} From 511480e86ba87752b7d06977f6fafee447e9f5e0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:14:21 -0700 Subject: [PATCH 018/103] Add foundry to CI --- .github/workflows/arbitrator-ci.yml | 5 +++++ .github/workflows/ci.yml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 8c491a421c..96080831c4 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -96,6 +96,11 @@ jobs: make -j make install + - name: Install foundry + run: | + curl -L https://foundry.paradigm.xyz | bash + foundryup + - name: Cache cbrotli uses: actions/cache@v3 id: cache-cbrotli diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f2c4fac84c..7ebc3b8734 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,11 @@ jobs: with: targets: 'wasm32-unknown-unknown, wasm32-wasi' + - name: Install foundry + run: | + curl -L https://foundry.paradigm.xyz | bash + foundryup + - name: Cache Build Products uses: actions/cache@v3 with: From 5377d0f8e527240bd24b567c106945c163502b34 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:21:04 -0700 Subject: [PATCH 019/103] Install foundry in docker and fix CI --- .github/workflows/arbitrator-ci.yml | 6 ++---- .github/workflows/ci.yml | 6 ++---- Dockerfile | 5 +++-- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 96080831c4..571f3ca983 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -96,10 +96,8 @@ jobs: make -j make install - - name: Install foundry - run: | - curl -L https://foundry.paradigm.xyz | bash - foundryup + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 - name: Cache cbrotli uses: actions/cache@v3 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7ebc3b8734..fbf00bcb2d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,10 +58,8 @@ jobs: with: targets: 'wasm32-unknown-unknown, wasm32-wasi' - - name: Install foundry - run: | - curl -L https://foundry.paradigm.xyz | bash - foundryup + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 - name: Cache Build Products uses: actions/cache@v3 diff --git a/Dockerfile b/Dockerfile index b62e569259..ea32941903 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,13 +26,14 @@ COPY --from=brotli-library-builder /workspace/install/ / FROM node:16-bullseye-slim as contracts-builder RUN apt-get update && \ - apt-get install -y git python3 make g++ + apt-get install -y git python3 make g++ curl +RUN curl -L https://foundry.paradigm.xyz | bash && . ~/.bashrc && ~/.foundry/bin/foundryup WORKDIR /workspace COPY contracts/package.json contracts/yarn.lock contracts/ RUN cd contracts && yarn install COPY contracts contracts/ COPY Makefile . -RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity +RUN . ~/.bashrc && NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity FROM debian:bullseye-20211220 as wasm-base WORKDIR /workspace From 6cca7042da059ddfbec16ff35a52139f424fc47e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:32:34 -0700 Subject: [PATCH 020/103] Fix docker build --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index ea32941903..173c0ff2a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -184,6 +184,7 @@ COPY fastcache/go.mod fastcache/go.sum fastcache/ RUN go mod download COPY . ./ COPY --from=contracts-builder workspace/contracts/build/ contracts/build/ +COPY --from=contracts-builder workspace/contracts/out/ contracts/out/ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/ COPY --from=contracts-builder workspace/.make/ .make/ COPY --from=prover-header-export / target/ From 609b4cf43a2279703282cdd779c6ac01fa106868 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 23:59:07 -0700 Subject: [PATCH 021/103] Add 4844 blob tx support to data poster (rbf is wip) --- arbnode/batch_poster.go | 1 + arbnode/dataposter/data_poster.go | 178 ++++++++++++++++++++------ arbnode/dataposter/storage/storage.go | 10 +- arbnode/dataposter/storage_test.go | 2 +- staker/validatorwallet/contract.go | 7 +- staker/validatorwallet/eoa.go | 3 +- 6 files changed, 152 insertions(+), 49 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 07034ee6f8..01a84b1c43 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -1071,6 +1071,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) data, gasLimit, new(big.Int), + nil, // TODO: use blobs accessList, ) if err != nil { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 09f3e218b1..425dba8e18 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -23,7 +23,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -31,12 +33,14 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" + "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -64,6 +68,7 @@ type DataPoster struct { metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) extraBacklog func() uint64 parentChainID *big.Int + parentChainID256 *uint256.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -177,6 +182,11 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro extraBacklog: opts.ExtraBacklog, parentChainID: opts.ParentChainID, } + var overflow bool + dp.parentChainID256, overflow = uint256.FromBig(opts.ParentChainID) + if overflow { + return nil, fmt.Errorf("parent chain ID %v overflows uint256 (necessary for blob transactions)", opts.ParentChainID) + } if dp.extraBacklog == nil { dp.extraBacklog = func() uint64 { return 0 } } @@ -363,7 +373,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { - nextNonce := lastQueueItem.Data.Nonce + 1 + nextNonce := lastQueueItem.FullTx.Nonce() + 1 if err := p.canPostWithNonce(ctx, nextNonce); err != nil { return 0, nil, false, err } @@ -442,27 +452,34 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs int, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if latestHeader.BaseFee == nil { - return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + } + newBlobFeeCap := big.NewInt(0) + if latestHeader.ExcessBlobGas != nil { + newBlobFeeCap = eip4844.CalcBlobFee(*latestHeader.ExcessBlobGas) + newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) + } else if numBlobs > 0 { + return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing ExcessBlobGas but blobs were specified in data poster transaction (either the parent chain node is not synced or EIP-4844 was improperly activated)", latestHeader.Number) } softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) softConfNonce, err := p.client.NonceAt(ctx, p.Sender(), softConfBlock) if err != nil { - return nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) + return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } - newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) + newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, common.Big2) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) newTipCap, err := p.client.SuggestGasTipCap(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) @@ -481,10 +498,13 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newFeeCap = arbmath.BigMax(newFeeCap, arbmath.BigMulByBips(lastFeeCap, minRbfIncrease)) } + // TODO: if we're significantly increasing the blob fee cap, we also need to increase the fee cap my minRbfIncrease + // TODO: look more into geth's blob mempool and make sure this behavior conforms (I think minRbfIncrease might be higher there) + elapsed := time.Since(dataCreatedAt) maxFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if arbmath.BigGreaterThan(newFeeCap, maxFeeCap) { log.Warn( @@ -496,6 +516,8 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newFeeCap = maxFeeCap } + // TODO: also have an expression limiting the max blob fee cap + latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) if config.AllocateMempoolBalance && !p.usingNoOpStorage { @@ -525,6 +547,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) } } + // TODO: take into account blob costs balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { log.Warn( @@ -550,10 +573,14 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newTipCap = new(big.Int).Set(newFeeCap) } - return newFeeCap, newTipCap, nil + return newFeeCap, newTipCap, newBlobFeeCap, nil } -func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, accessList types.AccessList) (*types.Transaction, error) { +func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { + return p.PostTransaction(ctx, time.Now(), nonce, nil, to, calldata, gasLimit, value, nil, nil) +} + +func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, kzgBlobs []kzg4844.Blob, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() @@ -570,27 +597,65 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, nil, nil, dataCreatedAt, 0) + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, len(kzgBlobs), nil, nil, dataCreatedAt, 0) if err != nil { return nil, err } - inner := types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: tipCap, - GasFeeCap: feeCap, - Gas: gasLimit, - To: &to, - Value: value, - Data: calldata, - AccessList: accessList, - ChainID: p.parentChainID, - } - fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(&inner)) + + var deprecatedData types.DynamicFeeTx + var inner types.TxData + if len(kzgBlobs) > 0 { + value256, overflow := uint256.FromBig(value) + if overflow { + return nil, fmt.Errorf("blob transaction callvalue %v overflows uint256", value) + } + // Intentionally break out of date data poster redis clients, + // so they don't try to replace by fee a tx they don't understand + deprecatedData.Nonce = ^uint64(0) + commitments, proofs, blobHashes, err := blobs.ComputeCommitmentsProofsAndHashes(kzgBlobs) + if err != nil { + return nil, fmt.Errorf("failed to compute KZG metadata: %w", err) + } + inner = &types.BlobTx{ + Nonce: nonce, + Gas: gasLimit, + To: to, + Value: value256, + Data: calldata, + Sidecar: &types.BlobTxSidecar{ + Blobs: kzgBlobs, + Commitments: commitments, + Proofs: proofs, + }, + BlobHashes: blobHashes, + AccessList: accessList, + ChainID: p.parentChainID256, + } + // reuse the code to convert gas fee and tip caps to uint256s + inner, err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) + if err != nil { + return nil, err + } + } else { + deprecatedData = types.DynamicFeeTx{ + Nonce: nonce, + GasFeeCap: feeCap, + GasTipCap: tipCap, + Gas: gasLimit, + To: &to, + Value: value, + Data: calldata, + AccessList: accessList, + ChainID: p.parentChainID, + } + inner = &deprecatedData + } + fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(inner)) if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } queuedTx := storage.QueuedTransaction{ - Data: inner, + DeprecatedData: deprecatedData, FullTx: fullTx, Meta: meta, Sent: false, @@ -603,8 +668,8 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim // the mutex must be held by the caller func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTransaction) error { if prevTx != nil { - if prevTx.Data.Nonce != newTx.Data.Nonce { - return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) + if prevTx.FullTx.Nonce() != newTx.FullTx.Nonce() { + return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.FullTx.Nonce(), newTx.FullTx.Nonce()) } // Check if prevTx is the same as newTx and we don't need to do anything @@ -621,7 +686,7 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr return nil } } - if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { + if err := p.queue.Put(ctx, newTx.FullTx.Nonce(), prevTx, newTx); err != nil { return fmt.Errorf("putting new tx in the queue: %w", err) } return nil @@ -645,22 +710,57 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return p.saveTx(ctx, newTx, &newerTx) } +func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (types.TxData, error) { + switch data := data.(type) { + case *types.DynamicFeeTx: + data.GasFeeCap = newFeeCap + data.GasTipCap = newTipCap + return data, nil + case *types.BlobTx: + var overflow bool + data.GasFeeCap, overflow = uint256.FromBig(newFeeCap) + if overflow { + return nil, fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) + } + data.GasTipCap, overflow = uint256.FromBig(newTipCap) + if overflow { + return nil, fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) + } + data.BlobFeeCap, overflow = uint256.FromBig(newBlobFeeCap) + if overflow { + return nil, fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) + } + return data, nil + default: + return nil, fmt.Errorf("unexpected transaction data type %T", data) + } +} + +func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (*types.Transaction, error) { + data, err := updateTxDataGasCaps(tx.GetInner(), newFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + return nil, err + } + return types.NewTx(data), nil +} + // The mutex must be held by the caller. func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Nonce, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), len(prevTx.FullTx.BlobHashes()), prevTx.FullTx.GasFeeCap(), prevTx.FullTx.GasTipCap(), prevTx.Created, backlogOfBatches) if err != nil { return err } - minNewFeeCap := arbmath.BigMulByBips(prevTx.Data.GasFeeCap, minRbfIncrease) + minNewFeeCap := arbmath.BigMulByBips(prevTx.FullTx.GasFeeCap(), minRbfIncrease) newTx := *prevTx + // TODO: also look at the blob fee cap if newFeeCap.Cmp(minNewFeeCap) < 0 { log.Debug( "no need to replace by fee transaction", - "nonce", prevTx.Data.Nonce, - "lastFeeCap", prevTx.Data.GasFeeCap, + "nonce", prevTx.FullTx.Nonce(), + "lastFeeCap", prevTx.FullTx.GasFeeCap(), "recommendedFeeCap", newFeeCap, - "lastTipCap", prevTx.Data.GasTipCap, + "lastTipCap", prevTx.FullTx.GasTipCap(), "recommendedTipCap", newTipCap, ) newTx.NextReplacement = time.Now().Add(time.Minute) @@ -676,9 +776,13 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa break } newTx.Sent = false - newTx.Data.GasFeeCap = newFeeCap - newTx.Data.GasTipCap = newTipCap - newTx.FullTx, err = p.signer(ctx, p.Sender(), types.NewTx(&newTx.Data)) + newTx.DeprecatedData.GasFeeCap = newFeeCap + newTx.DeprecatedData.GasTipCap = newTipCap + unsignedTx, err := updateGasCaps(newTx.FullTx, newFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + return err + } + newTx.FullTx, err = p.signer(ctx, p.Sender(), unsignedTx) if err != nil { return err } @@ -750,7 +854,7 @@ func (p *DataPoster) updateBalance(ctx context.Context) error { const maxConsecutiveIntermittentErrors = 10 func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg string) { - nonce := tx.Data.Nonce + nonce := tx.FullTx.Nonce() if err == nil { delete(p.errorCount, nonce) return @@ -764,7 +868,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap, "gas", tx.Data.Gas) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "gas", tx.FullTx.Gas()) } const minWait = time.Second * 10 diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index a9e78fcc58..9586b9c9a9 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -27,7 +27,7 @@ var ( type QueuedTransaction struct { FullTx *types.Transaction - Data types.DynamicFeeTx + DeprecatedData types.DynamicFeeTx // FullTx should be used instead Meta []byte Sent bool Created time.Time // may be earlier than the tx was given to the tx poster @@ -46,7 +46,7 @@ type queuedTransactionForEncoding struct { func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { return rlp.Encode(w, queuedTransactionForEncoding{ FullTx: qt.FullTx, - Data: qt.Data, + Data: qt.DeprecatedData, Meta: qt.Meta, Sent: qt.Sent, Created: (RlpTime)(qt.Created), @@ -60,7 +60,7 @@ func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { return err } qt.FullTx = qtEnc.FullTx - qt.Data = qtEnc.Data + qt.DeprecatedData = qtEnc.Data qt.Meta = qtEnc.Meta qt.Sent = qtEnc.Sent qt.Created = time.Time(qtEnc.Created) @@ -107,7 +107,7 @@ func LegacyToQueuedTransaction(legacyQT *LegacyQueuedTransaction) (*QueuedTransa } return &QueuedTransaction{ FullTx: legacyQT.FullTx, - Data: legacyQT.Data, + DeprecatedData: legacyQT.Data, Meta: meta, Sent: legacyQT.Sent, Created: legacyQT.Created, @@ -127,7 +127,7 @@ func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, } return &LegacyQueuedTransaction{ FullTx: qt.FullTx, - Data: qt.Data, + Data: qt.DeprecatedData, Meta: meta, Sent: qt.Sent, Created: qt.Created, diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index cf9918941e..f98c120f38 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -84,7 +84,7 @@ func valueOf(t *testing.T, i int) *storage.QueuedTransaction { big.NewInt(int64(i)), []byte{byte(i)}), Meta: meta, - Data: types.DynamicFeeTx{ + DeprecatedData: types.DynamicFeeTx{ ChainID: big.NewInt(int64(i)), Nonce: uint64(i), GasTipCap: big.NewInt(int64(i)), diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 774e9ab407..deed7942ab 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -10,7 +10,6 @@ import ( "math/big" "strings" "sync/atomic" - "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -177,7 +176,7 @@ func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) + return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { @@ -288,7 +287,7 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value, nil) + arbTx, err := v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), txData, gas, auth.Value) if err != nil { return nil, err } @@ -338,7 +337,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) ( if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) + return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } // gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate. diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index 44af5e2b60..3ae305b36c 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -6,7 +6,6 @@ package validatorwallet import ( "context" "fmt" - "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -95,7 +94,7 @@ func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (* return nil, err } gas := baseTx.Gas() + w.getExtraGas() - newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value(), nil) + newTx, err := w.dataPoster.PostSimpleTransaction(ctx, nonce, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } From d3d0e0fb1270ce82f82a4a12c57b589fa18e24b7 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 00:25:54 -0700 Subject: [PATCH 022/103] Add support for 4844 batch posting --- arbnode/batch_poster.go | 138 ++++++++++++++++++++++++------ arbnode/dataposter/data_poster.go | 10 ++- arbutil/wait_for_l1.go | 2 + util/blobs/blobs.go | 28 ++++-- 4 files changed, 140 insertions(+), 38 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 01a84b1c43..2dc9bac340 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -18,15 +18,18 @@ import ( "github.com/andybalholm/brotli" "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" @@ -40,6 +43,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -48,7 +52,16 @@ import ( var ( batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + + usableBytesInBlob = big.NewInt(int64(len(kzg4844.Blob{}) * 31 / 32)) + blobTxBlobGasPerBlob = big.NewInt(params.BlobTxBlobGasPerBlob) +) + +const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" + + sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlob" // TODO: this will probably get updated to have a plural name ) type batchPosterPosition struct { @@ -119,6 +132,7 @@ type BatchPosterConfig struct { RedisUrl string `koanf:"redis-url"` RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -166,6 +180,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") + f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -188,6 +203,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 50_000, + Post4844Blobs: true, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -215,6 +231,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 10_000, + Post4844Blobs: true, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -753,30 +770,73 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { return fullMsg, nil } -func (b *BatchPoster) encodeAddBatch(seqNum *big.Int, prevMsgNum arbutil.MessageIndex, newMsgNum arbutil.MessageIndex, message []byte, delayedMsg uint64) ([]byte, error) { - method, ok := b.seqInboxABI.Methods["addSequencerL2BatchFromOrigin0"] +func (b *BatchPoster) encodeAddBatch( + seqNum *big.Int, + prevMsgNum arbutil.MessageIndex, + newMsgNum arbutil.MessageIndex, + l2MessageData []byte, + delayedMsg uint64, + use4844 bool, +) ([]byte, []kzg4844.Blob, error) { + methodName := sequencerBatchPostMethodName + if use4844 { + methodName = sequencerBatchPostWithBlobsMethodName + } + method, ok := b.seqInboxABI.Methods[methodName] if !ok { - return nil, errors.New("failed to find add batch method") - } - inputData, err := method.Inputs.Pack( - seqNum, - message, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) + return nil, nil, errors.New("failed to find add batch method") + } + var calldata []byte + var kzgBlobs []kzg4844.Blob + var err error + if use4844 { + kzgBlobs, err = blobs.EncodeBlobs(l2MessageData) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode blobs: %w", err) + } + // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. + calldata, err = method.Inputs.Pack( + seqNum, + new(big.Int).SetUint64(delayedMsg), + b.config().gasRefunder, + new(big.Int).SetUint64(uint64(prevMsgNum)), + new(big.Int).SetUint64(uint64(newMsgNum)), + ) + } else { + calldata, err = method.Inputs.Pack( + seqNum, + l2MessageData, + new(big.Int).SetUint64(delayedMsg), + b.config().gasRefunder, + new(big.Int).SetUint64(uint64(prevMsgNum)), + new(big.Int).SetUint64(uint64(newMsgNum)), + ) + } if err != nil { - return nil, err + return nil, nil, err } - fullData := append([]byte{}, method.ID...) - fullData = append(fullData, inputData...) - return fullData, nil + fullCalldata := append([]byte{}, method.ID...) + fullCalldata = append(fullCalldata, calldata...) + return fullCalldata, kzgBlobs, nil } var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") -func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realNonce uint64, realAccessList types.AccessList) (uint64, error) { +type estimateGasParams struct { + From common.Address `json:"from"` + To *common.Address `json:"to"` + Data []byte `json:"data"` + AccessList types.AccessList `json:"accessList"` + BlobHashes []common.Hash `json:"blobVersionedHashes"` +} + +func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { + var gas uint64 + err := client.CallContext(ctx, &gas, "eth_estimateGas", params) + return gas, err +} + +func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { config := b.config() useNormalEstimation := b.dataPoster.MaxMempoolTransactions() == 1 if !useNormalEstimation { @@ -787,12 +847,18 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, } useNormalEstimation = latestNonce == realNonce } + rawRpcClient := b.l1Reader.Client().Client() if useNormalEstimation { + _, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs) + if err != nil { + return 0, fmt.Errorf("failed to compute real blob commitments: %w", err) + } // If we're at the latest nonce, we can skip the special future tx estimate stuff - gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ + gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ From: b.dataPoster.Sender(), To: &b.seqInboxAddr, Data: realData, + BlobHashes: realBlobHashes, AccessList: realAccessList, }) if err != nil { @@ -805,14 +871,19 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, // However, we set nextMsgNum to 1 because it is necessary for a correct estimation for the final to be non-zero. // Because we're likely estimating against older state, this might not be the actual next message, // but the gas used should be the same. - data, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages) + data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0) if err != nil { return 0, err } - gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: data, + _, blobHashes, err := blobs.ComputeCommitmentsAndHashes(kzgBlobs) + if err != nil { + return 0, fmt.Errorf("failed to compute blob commitments: %w", err) + } + gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: data, + BlobHashes: blobHashes, // This isn't perfect because we're probably estimating the batch at a different sequence number, // but it should overestimate rather than underestimate which is fine. AccessList: realAccessList, @@ -1039,7 +1110,20 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } - data, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg) + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + var use4844 bool + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, use4844) if err != nil { return false, err } @@ -1051,7 +1135,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // In theory, this might reduce gas usage, but only by a factor that's already // accounted for in `config.ExtraBatchGas`, as that same factor can appear if a user // posts a new delayed message that we didn't see while gas estimating. - gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, nonce, accessList) + gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList) if err != nil { return false, err } @@ -1071,7 +1155,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) data, gasLimit, new(big.Int), - nil, // TODO: use blobs + kzgBlobs, accessList, ) if err != nil { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 425dba8e18..ba9c278ba5 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -464,7 +464,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u } newBlobFeeCap := big.NewInt(0) if latestHeader.ExcessBlobGas != nil { - newBlobFeeCap = eip4844.CalcBlobFee(*latestHeader.ExcessBlobGas) + newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) } else if numBlobs > 0 { return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing ExcessBlobGas but blobs were specified in data poster transaction (either the parent chain node is not synced or EIP-4844 was improperly activated)", latestHeader.Number) @@ -612,9 +612,13 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim // Intentionally break out of date data poster redis clients, // so they don't try to replace by fee a tx they don't understand deprecatedData.Nonce = ^uint64(0) - commitments, proofs, blobHashes, err := blobs.ComputeCommitmentsProofsAndHashes(kzgBlobs) + commitments, blobHashes, err := blobs.ComputeCommitmentsAndHashes(kzgBlobs) if err != nil { - return nil, fmt.Errorf("failed to compute KZG metadata: %w", err) + return nil, fmt.Errorf("failed to compute KZG commitments: %w", err) + } + proofs, err := blobs.ComputeBlobProofs(kzgBlobs, commitments) + if err != nil { + return nil, fmt.Errorf("failed to compute KZG proofs: %w", err) } inner = &types.BlobTx{ Nonce: nonce, diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index b66710dbf0..9fb2cd10f8 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rpc" ) type L1Interface interface { @@ -25,6 +26,7 @@ type L1Interface interface { BlockNumber(ctx context.Context) (uint64, error) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) ChainID(ctx context.Context) (*big.Int, error) + Client() rpc.ClientInterface } func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) { diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index c8025dc253..60cc898751 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -5,6 +5,7 @@ package blobs import ( "crypto/sha256" + "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -52,20 +53,15 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { } // Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs -func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) { +func ComputeCommitmentsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []common.Hash, error) { commitments := make([]kzg4844.Commitment, len(blobs)) - proofs := make([]kzg4844.Proof, len(blobs)) versionedHashes := make([]common.Hash, len(blobs)) for i := range blobs { var err error commitments[i], err = kzg4844.BlobToCommitment(blobs[i]) if err != nil { - return nil, nil, nil, err - } - proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) - if err != nil { - return nil, nil, nil, err + return nil, nil, err } // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. hash := sha256.Sum256(commitments[i][:]) @@ -73,5 +69,21 @@ func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitme versionedHashes[i] = hash } - return commitments, proofs, versionedHashes, nil + return commitments, versionedHashes, nil +} + +func ComputeBlobProofs(blobs []kzg4844.Blob, commitments []kzg4844.Commitment) ([]kzg4844.Proof, error) { + if len(blobs) != len(commitments) { + return nil, fmt.Errorf("ComputeBlobProofs got %v blobs but %v commitments", len(blobs), len(commitments)) + } + proofs := make([]kzg4844.Proof, len(blobs)) + for i := range blobs { + var err error + proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) + if err != nil { + return nil, err + } + } + + return proofs, nil } From d229f3c306fb89f5c7193ebbbeb2b8aa165433ce Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 00:30:51 -0700 Subject: [PATCH 023/103] Use updated contracts --- arbnode/batch_poster.go | 2 +- contracts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 2dc9bac340..87170caa8a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -61,7 +61,7 @@ const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" - sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlob" // TODO: this will probably get updated to have a plural name + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" ) type batchPosterPosition struct { diff --git a/contracts b/contracts index a8e7709bfc..00d4d62578 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit a8e7709bfc918f9b8e2888d47f2fd8454779fd11 +Subproject commit 00d4d6257835ba58bb381ce8d884a819d7ce9448 From 4bcb5ec449e6a5d286e7bb6f7ee7739aa8674bef Mon Sep 17 00:00:00 2001 From: amsanghi Date: Tue, 23 Jan 2024 16:29:18 +0530 Subject: [PATCH 024/103] Fix --- arbnode/transaction_streamer.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 5491cbdbf2..f7ccae678c 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -970,14 +970,14 @@ func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec execution } var msgForPrefetch *arbostypes.MessageWithMetadata if pos+1 < msgCount { - msgForPrefetch, err = s.GetMessage(pos + 1) + msg, err := s.GetMessage(pos + 1) if err != nil { log.Error("feedOneMsg failed to readMessage", "err", err, "pos", pos+1) return false } + msgForPrefetch = msg } - err = s.exec.DigestMessage(pos, msg, msgForPrefetch) - if err != nil { + if err = s.exec.DigestMessage(pos, msg, msgForPrefetch); err != nil { logger := log.Warn if prevMessageCount < msgCount { logger = log.Debug From 2694d8e70f5b0c932fcaabe9b9a5b3bff352dff5 Mon Sep 17 00:00:00 2001 From: amsanghi Date: Tue, 23 Jan 2024 17:00:18 +0530 Subject: [PATCH 025/103] Add comments --- execution/gethexec/executionengine.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index d376c59ba2..e941faf9f9 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -502,6 +502,11 @@ func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.Mess return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } +// DigestMessage is used to create a block by executing msg against the latest state and storing it. +// Also, while creating a block by executing msg against the latest state, +// in parallel, creates a block by executing msgForPrefetch (msg+1) against the latest state +// but does not store the block. +// This helps in filling the cache, so that the next block creation is faster. func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { if !s.createBlocksMutex.TryLock() { return errors.New("createBlock mutex held") From eba35e5dca4dbddfec6759d2a68fc2df91fcaa02 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 12:53:25 -0700 Subject: [PATCH 026/103] Fix bit checks --- arbstate/das_reader.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 5f568605b1..46d01b7bb1 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -46,20 +46,24 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +func hasBits(checking byte, bits byte) bool { + return (checking & bits) == bits +} + func IsDASMessageHeaderByte(header byte) bool { - return (DASMessageHeaderFlag & header) > 0 + return hasBits(header, DASMessageHeaderFlag) } func IsTreeDASMessageHeaderByte(header byte) bool { - return (TreeDASMessageHeaderFlag & header) > 0 + return hasBits(header, TreeDASMessageHeaderFlag) } func IsZeroheavyEncodedHeaderByte(header byte) bool { - return (ZeroheavyMessageHeaderFlag & header) > 0 + return hasBits(header, ZeroheavyMessageHeaderFlag) } func IsBlobHashesHeaderByte(header byte) bool { - return (BlobHashesHeaderFlag & header) > 0 + return hasBits(header, BlobHashesHeaderFlag) } func IsBrotliMessageHeaderByte(b uint8) bool { From 863911649278001d7324e4a000be0f02e871fbb0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 12:26:19 -0700 Subject: [PATCH 027/103] Fix Data field type in estimateGasParams --- arbnode/batch_poster.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 87170caa8a..b4bf4c807b 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -825,7 +826,7 @@ var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") type estimateGasParams struct { From common.Address `json:"from"` To *common.Address `json:"to"` - Data []byte `json:"data"` + Data hexutil.Bytes `json:"data"` AccessList types.AccessList `json:"accessList"` BlobHashes []common.Hash `json:"blobVersionedHashes"` } From 5cdcde12efbce6fe425df4bdebd7cef842135892 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 16:14:32 -0700 Subject: [PATCH 028/103] Fix raw estimateGas in batch poster --- arbnode/batch_poster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index b4bf4c807b..28c248043a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -832,9 +832,9 @@ type estimateGasParams struct { } func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { - var gas uint64 + var gas hexutil.Uint64 err := client.CallContext(ctx, &gas, "eth_estimateGas", params) - return gas, err + return uint64(gas), err } func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { From 54ee5c31ffdba4119a124dcd9bce09813f670e52 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 16:14:49 -0700 Subject: [PATCH 029/103] Misc refactors while I was tracking down the previous bug --- arbnode/batch_poster.go | 11 ++++++----- arbnode/dataposter/data_poster.go | 19 ++++++++++--------- system_tests/batch_poster_test.go | 2 +- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 28c248043a..53d3e7f403 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -828,7 +828,7 @@ type estimateGasParams struct { To *common.Address `json:"to"` Data hexutil.Bytes `json:"data"` AccessList types.AccessList `json:"accessList"` - BlobHashes []common.Hash `json:"blobVersionedHashes"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` } func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { @@ -1164,12 +1164,13 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } log.Info( "BatchPoster: batch sent", - "sequence nr.", batchPosition.NextSeqNum, + "sequenceNumber", batchPosition.NextSeqNum, "from", batchPosition.MessageCount, "to", b.building.msgCount, - "prev delayed", batchPosition.DelayedMessageCount, - "current delayed", b.building.segments.delayedMsg, - "total segments", len(b.building.segments.rawSegments), + "prevDelayed", batchPosition.DelayedMessageCount, + "currentDelayed", b.building.segments.delayedMsg, + "totalSegments", len(b.building.segments.rawSegments), + "numBlobs", len(kzgBlobs), ) recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index ba9c278ba5..4f3f514d11 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -636,7 +636,7 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim ChainID: p.parentChainID256, } // reuse the code to convert gas fee and tip caps to uint256s - inner, err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) + err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) if err != nil { return nil, err } @@ -714,34 +714,35 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return p.saveTx(ctx, newTx, &newerTx) } -func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (types.TxData, error) { +func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) error { switch data := data.(type) { case *types.DynamicFeeTx: data.GasFeeCap = newFeeCap data.GasTipCap = newTipCap - return data, nil + return nil case *types.BlobTx: var overflow bool data.GasFeeCap, overflow = uint256.FromBig(newFeeCap) if overflow { - return nil, fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) + return fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) } data.GasTipCap, overflow = uint256.FromBig(newTipCap) if overflow { - return nil, fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) + return fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) } data.BlobFeeCap, overflow = uint256.FromBig(newBlobFeeCap) if overflow { - return nil, fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) + return fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) } - return data, nil + return nil default: - return nil, fmt.Errorf("unexpected transaction data type %T", data) + return fmt.Errorf("unexpected transaction data type %T", data) } } func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (*types.Transaction, error) { - data, err := updateTxDataGasCaps(tx.GetInner(), newFeeCap, newTipCap, newBlobFeeCap) + data := tx.GetInner() + err := updateTxDataGasCaps(data, newFeeCap, newTipCap, newBlobFeeCap) if err != nil { return nil, err } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index f7bf74f699..cacbe3cee4 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -180,7 +180,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } lastTxHash := txs[len(txs)-1].Hash() - for i := 90; i > 0; i-- { + for i := 90; i >= 0; i-- { builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) From 4925e63acaafc651c64302062f976cd9d4c754fe Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 16:23:23 -0700 Subject: [PATCH 030/103] Add option to force posting 4844 blobs --- arbnode/batch_poster.go | 20 ++++++++++++++------ arbnode/dataposter/data_poster.go | 8 ++++++-- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 53d3e7f403..4a07d36521 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -134,6 +134,7 @@ type BatchPosterConfig struct { RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` + ForcePost4844Blobs bool `koanf:"force-post-4844-blobs" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -182,6 +183,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") + f.Bool(prefix+".force-post-4844-blobs", DefaultBatchPosterConfig.ForcePost4844Blobs, "if the parent chain supports 4844 blobs and post-4844-blobs is true, post 4844 blobs even if it's not price efficient") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -205,6 +207,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 50_000, Post4844Blobs: true, + ForcePost4844Blobs: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -233,6 +236,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 10_000, Post4844Blobs: true, + ForcePost4844Blobs: false, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -1116,13 +1120,17 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, err } var use4844 bool - if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.ForcePost4844Blobs { + use4844 = true + } else { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } } data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, use4844) if err != nil { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 4f3f514d11..1415f78140 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -463,11 +463,15 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } newBlobFeeCap := big.NewInt(0) - if latestHeader.ExcessBlobGas != nil { + if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) } else if numBlobs > 0 { - return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing ExcessBlobGas but blobs were specified in data poster transaction (either the parent chain node is not synced or EIP-4844 was improperly activated)", latestHeader.Number) + return nil, nil, nil, fmt.Errorf( + "latest parent chain block %v missing ExcessBlobGas or BlobGasUsed but blobs were specified in data poster transaction "+ + "(either the parent chain node is not synced or the EIP-4844 was improperly activated)", + latestHeader.Number, + ) } softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) softConfNonce, err := p.client.NonceAt(ctx, p.Sender(), softConfBlock) From b6b54ef82b6f803fc5998777ffc138c23a4faa59 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 17:35:49 -0700 Subject: [PATCH 031/103] Use custom beacon request helper --- arbnode/blob_reader.go | 158 ++++++++++++++++++----------------- arbnode/node.go | 8 +- util/beaconclient/client.go | 98 ---------------------- util/beaconclient/errors.go | 40 --------- util/beaconclient/options.go | 48 ----------- util/blobs/blobs.go | 12 ++- 6 files changed, 90 insertions(+), 274 deletions(-) delete mode 100644 util/beaconclient/client.go delete mode 100644 util/beaconclient/errors.go delete mode 100644 util/beaconclient/options.go diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index 673df37b1f..d7560f47e4 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -5,24 +5,26 @@ package arbnode import ( "context" - "crypto/sha256" "encoding/json" "fmt" + "io" + "net/http" + "path" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/util/beaconclient" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/pretty" - "github.com/pkg/errors" "github.com/spf13/pflag" ) type BlobClient struct { - bc *beaconclient.Client - ec arbutil.L1Interface + config BlobClientConfig + ec arbutil.L1Interface + httpClient *http.Client // The genesis time time won't change so only request it once. cachedGenesisTime uint64 @@ -40,8 +42,45 @@ func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") } -func NewBlobClient(bc *beaconclient.Client, ec arbutil.L1Interface) *BlobClient { - return &BlobClient{bc: bc, ec: ec} +func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) *BlobClient { + return &BlobClient{ + config: config, + ec: ec, + httpClient: &http.Client{}, + } +} + +type fullResult[T any] struct { + Data T `json:"data"` +} + +func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath string) (T, error) { + // Unfortunately, methods on a struct can't be generic. + + var empty T + + req, err := http.NewRequestWithContext(ctx, "GET", path.Join(b.config.BeaconChainUrl, beaconPath), http.NoBody) + if err != nil { + return empty, err + } + + resp, err := b.httpClient.Do(req) + if err != nil { + return empty, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return empty, err + } + + var full fullResult[T] + if err := json.Unmarshal(body, &full); err != nil { + return empty, err + } + + return full.Data, nil } // Get all the blobs associated with a particular block. @@ -62,58 +101,48 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio return b.blobSidecars(ctx, slot, versionedHashes) } -type blobResponse struct { - Data []blobResponseItem `json:"data"` -} type blobResponseItem struct { - BlockRoot string `json:"block_root"` - Index int `json:"index"` - Slot uint64 `json:"slot"` - BlockParentRoot string `json:"block_parent_root"` - ProposerIndex uint64 `json:"proposer_index"` - Blob string `json:"blob"` - KzgCommitment string `json:"kzg_commitment"` - KzgProof string `json:"kzg_proof"` + BlockRoot string `json:"block_root"` + Index int `json:"index"` + Slot uint64 `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex uint64 `json:"proposer_index"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` } func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { - body, err := b.bc.Get(ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + response, err := beaconRequest[[]blobResponseItem](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil { - return nil, errors.Wrap(err, "error calling beacon client in blobSidecars") + return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) } - br := &blobResponse{} - err = json.Unmarshal(body, br) - if err != nil { - return nil, errors.Wrap(err, "error decoding json response in blobSidecars") - } - - if len(br.Data) == 0 { - return nil, fmt.Errorf("no blobs found for slot %d", slot) + if len(response) < len(versionedHashes) { + return nil, fmt.Errorf("expected at least %d blobs for slot %d but only got %d", len(versionedHashes), slot, len(response)) } - blobs := make([]kzg4844.Blob, len(versionedHashes)) - var totalFound int + output := make([]kzg4844.Blob, len(versionedHashes)) + outputsFound := make([]bool, len(versionedHashes)) - for i := range blobs { - commitmentBytes, err := hexutil.Decode(br.Data[i].KzgCommitment) - if err != nil { - return nil, fmt.Errorf("couldn't decode commitment for slot(%d) at index(%d), commitment(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgCommitment)) - } + for _, blobItem := range response { var commitment kzg4844.Commitment - copy(commitment[:], commitmentBytes) - versionedHash := kZGToVersionedHash(commitment) + copy(commitment[:], blobItem.KzgCommitment) + versionedHash := blobs.CommitmentToVersionedHash(commitment) // The versioned hashes of the blob commitments are produced in the by HASH_OPCODE_BYTE, // presumably in the order they were added to the tx. The spec is unclear if the blobs // need to be returned in any particular order from the beacon API, so we put them back in // the order from the tx. - var j int + var outputIdx int var found bool - for j = range versionedHashes { - if versionedHashes[j] == versionedHash { + for outputIdx = range versionedHashes { + if versionedHashes[outputIdx] == versionedHash { found = true - totalFound++ + if outputsFound[outputIdx] { + return nil, fmt.Errorf("found blob with versioned hash %v twice", versionedHash) + } + outputsFound[outputIdx] = true break } } @@ -121,30 +150,24 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas continue } - blob, err := hexutil.Decode(br.Data[i].Blob) - if err != nil { - return nil, fmt.Errorf("couldn't decode blob for slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) - } - copy(blobs[j][:], blob) + copy(output[outputIdx][:], blobItem.Blob) - proofBytes, err := hexutil.Decode(br.Data[i].KzgProof) - if err != nil { - return nil, fmt.Errorf("couldn't decode proof for slot(%d) at index(%d), proof(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgProof)) - } var proof kzg4844.Proof - copy(proof[:], proofBytes) + copy(proof[:], blobItem.KzgProof) - err = kzg4844.VerifyBlobProof(blobs[j], commitment, proof) + err = kzg4844.VerifyBlobProof(output[outputIdx], commitment, proof) if err != nil { - return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) + return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, blobItem.Index, pretty.FirstFewChars(blobItem.Blob.String())) } } - if totalFound < len(versionedHashes) { - return nil, fmt.Errorf("not all of the requested blobs (%d/%d) were found at slot (%d), can't reconstruct batch payload", totalFound, len(versionedHashes), slot) + for i, found := range outputsFound { + if !found { + return nil, fmt.Errorf("missing blob %v in slot %v, can't reconstruct batch payload", versionedHashes[i], slot) + } } - return blobs, nil + return output, nil } type genesisResponse struct { @@ -157,29 +180,10 @@ func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { return b.cachedGenesisTime, nil } - body, err := b.bc.Get(ctx, "/eth/v1/beacon/genesis") + gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") if err != nil { - return 0, errors.Wrap(err, "error calling beacon client in genesisTime") - } - - gr := &genesisResponse{} - dataWrapper := &struct{ Data *genesisResponse }{Data: gr} - err = json.Unmarshal(body, dataWrapper) - if err != nil { - return 0, errors.Wrap(err, "error decoding json response in genesisTime") + return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) } return gr.GenesisTime, nil } - -// The following code is taken from core/vm/contracts.go -const ( - blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. -) - -func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { - h := sha256.Sum256(kzg[:]) - h[0] = blobCommitmentVersionKZG - - return h -} diff --git a/arbnode/node.go b/arbnode/node.go index 99ecb541ee..9f5626bbaf 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -40,7 +40,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" - "github.com/offchainlabs/nitro/util/beaconclient" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" @@ -518,12 +517,7 @@ func createNodeImpl( var blobReader arbstate.BlobReader if config.BlobClient.BeaconChainUrl != "" { - bc, err := beaconclient.NewClient(config.BlobClient.BeaconChainUrl) - if err != nil { - return nil, err - } - - blobReader = NewBlobClient(bc, l1client) + blobReader = NewBlobClient(config.BlobClient, l1client) } inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) diff --git a/util/beaconclient/client.go b/util/beaconclient/client.go deleted file mode 100644 index e2dfd8e6bf..0000000000 --- a/util/beaconclient/client.go +++ /dev/null @@ -1,98 +0,0 @@ -package beaconclient - -import ( - "context" - "io" - "net" - "net/http" - "net/url" - - "github.com/pkg/errors" -) - -// Client is a wrapper object around the HTTP client. -// Taken from prysm/api/client. -type Client struct { - hc *http.Client - baseURL *url.URL - token string -} - -// NewClient constructs a new client with the provided options (ex WithTimeout). -// `host` is the base host + port used to construct request urls. This value can be -// a URL string, or NewClient will assume an http endpoint if just `host:port` is used. -func NewClient(host string, opts ...ClientOpt) (*Client, error) { - u, err := urlForHost(host) - if err != nil { - return nil, err - } - c := &Client{ - hc: &http.Client{}, - baseURL: u, - } - for _, o := range opts { - o(c) - } - return c, nil -} - -// Token returns the bearer token used for jwt authentication -func (c *Client) Token() string { - return c.token -} - -// BaseURL returns the base url of the client -func (c *Client) BaseURL() *url.URL { - return c.baseURL -} - -// Do execute the request against the http client -func (c *Client) Do(req *http.Request) (*http.Response, error) { - return c.hc.Do(req) -} - -func urlForHost(h string) (*url.URL, error) { - // try to parse as url (being permissive) - u, err := url.Parse(h) - if err == nil && u.Host != "" { - return u, nil - } - // try to parse as host:port - host, port, err := net.SplitHostPort(h) - if err != nil { - return nil, ErrMalformedHostname - } - return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil -} - -// NodeURL returns a human-readable string representation of the beacon node base url. -func (c *Client) NodeURL() string { - return c.baseURL.String() -} - -// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package. -func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) { - u := c.baseURL.ResolveReference(&url.URL{Path: path}) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - for _, o := range opts { - o(req) - } - r, err := c.hc.Do(req) - if err != nil { - return nil, err - } - defer func() { - err = r.Body.Close() - }() - if r.StatusCode != http.StatusOK { - return nil, Non200Err(r) - } - b, err := io.ReadAll(r.Body) - if err != nil { - return nil, errors.Wrap(err, "error reading http response body") - } - return b, nil -} diff --git a/util/beaconclient/errors.go b/util/beaconclient/errors.go deleted file mode 100644 index 7ee88805cd..0000000000 --- a/util/beaconclient/errors.go +++ /dev/null @@ -1,40 +0,0 @@ -package beaconclient - -import ( - "fmt" - "io" - "net/http" - - "github.com/pkg/errors" -) - -// ErrMalformedHostname is used to indicate if a host name's format is incorrect. -var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500") - -// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code. -// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK. -var ErrNotOK = errors.New("did not receive 2xx response from API") - -// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API. -var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API") - -// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized. -var ErrInvalidNodeVersion = errors.New("invalid node version response") - -// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error. -func Non200Err(response *http.Response) error { - bodyBytes, err := io.ReadAll(response.Body) - var body string - if err != nil { - body = "(Unable to read response body.)" - } else { - body = "response body:\n" + string(bodyBytes) - } - msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body) - switch response.StatusCode { - case 404: - return errors.Wrap(ErrNotFound, msg) - default: - return errors.Wrap(ErrNotOK, msg) - } -} diff --git a/util/beaconclient/options.go b/util/beaconclient/options.go deleted file mode 100644 index 98a37e17a0..0000000000 --- a/util/beaconclient/options.go +++ /dev/null @@ -1,48 +0,0 @@ -package beaconclient - -import ( - "fmt" - "net/http" - "time" -) - -// ReqOption is a request functional option. -type ReqOption func(*http.Request) - -// WithSSZEncoding is a request functional option that adds SSZ encoding header. -func WithSSZEncoding() ReqOption { - return func(req *http.Request) { - req.Header.Set("Accept", "application/octet-stream") - } -} - -// WithAuthorizationToken is a request functional option that adds header for authorization token. -func WithAuthorizationToken(token string) ReqOption { - return func(req *http.Request) { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - } -} - -// ClientOpt is a functional option for the Client type (http.Client wrapper) -type ClientOpt func(*Client) - -// WithTimeout sets the .Timeout attribute of the wrapped http.Client. -func WithTimeout(timeout time.Duration) ClientOpt { - return func(c *Client) { - c.hc.Timeout = timeout - } -} - -// WithRoundTripper replaces the underlying HTTP's transport with a custom one. -func WithRoundTripper(t http.RoundTripper) ClientOpt { - return func(c *Client) { - c.hc.Transport = t - } -} - -// WithAuthenticationToken sets an oauth token to be used. -func WithAuthenticationToken(token string) ClientOpt { - return func(c *Client) { - c.token = token - } -} diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index c8025dc253..9f6c8d1303 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -51,6 +51,13 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { return outputData, err } +func CommitmentToVersionedHash(commitment kzg4844.Commitment) common.Hash { + // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. + hash := sha256.Sum256(commitment[:]) + hash[0] = 1 + return hash +} + // Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) { commitments := make([]kzg4844.Commitment, len(blobs)) @@ -67,10 +74,7 @@ func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitme if err != nil { return nil, nil, nil, err } - // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. - hash := sha256.Sum256(commitments[i][:]) - hash[0] = 1 - versionedHashes[i] = hash + versionedHashes[i] = CommitmentToVersionedHash(commitments[i]) } return commitments, proofs, versionedHashes, nil From 2ba1c490c3fb0fef695ade891e102da167350c0e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 18:14:17 -0700 Subject: [PATCH 032/103] Update contracts to support zero basefee for gas estimation --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 00d4d62578..e253b8b1b5 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 00d4d6257835ba58bb381ce8d884a819d7ce9448 +Subproject commit e253b8b1b5865f135ac63ea3d3cea1bfe8ef2ad7 From 2bd34c65b3d5fba594690dba15be296c27b80454 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 23 Jan 2024 17:25:51 -0800 Subject: [PATCH 033/103] Fetch seconds per slot from beacon endpoint --- arbnode/blob_reader.go | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index d7560f47e4..02626efff5 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -26,8 +26,9 @@ type BlobClient struct { ec arbutil.L1Interface httpClient *http.Client - // The genesis time time won't change so only request it once. - cachedGenesisTime uint64 + // The genesis time time and seconds per slot won't change so only request them once. + cachedGenesisTime uint64 + cachedSecondsPerSlot uint64 } type BlobClientConfig struct { @@ -89,15 +90,15 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio if err != nil { return nil, err } - genesisTime, err := b.genesisTime(ctx) if err != nil { return nil, err } - - // TODO make denominator configurable for devnets with faster block time - slot := (header.Time - genesisTime) / 12 - + secondsPerSlot, err := b.secondsPerSlot(ctx) + if err != nil { + return nil, err + } + slot := (header.Time - genesisTime) / secondsPerSlot return b.blobSidecars(ctx, slot, versionedHashes) } @@ -179,11 +180,27 @@ func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { if b.cachedGenesisTime > 0 { return b.cachedGenesisTime, nil } - gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") if err != nil { return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) } + b.cachedGenesisTime = gr.GenesisTime + return b.cachedGenesisTime, nil +} + +type getSpecResponse struct { + SecondsPerSlot uint64 `json:"SECONDS_PER_SLOT"` +} + +func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { + if b.cachedSecondsPerSlot > 0 { + return b.cachedSecondsPerSlot, nil + } + gr, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") + if err != nil { + return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) + } + b.cachedSecondsPerSlot = gr.SecondsPerSlot + return b.cachedSecondsPerSlot, nil - return gr.GenesisTime, nil } From fe9fce1c995734b2fcedb4c0d8658a578156a43e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 18:55:45 -0700 Subject: [PATCH 034/103] Update go-ethereum pin to fix trusted setup --- go-ethereum | 2 +- go.mod | 6 +++--- go.sum | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go-ethereum b/go-ethereum index c4b3aa68a0..eca6e11dad 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c4b3aa68a05f468e0c30147f9383bfc76d82388f +Subproject commit eca6e11dad2c7f8cd1276e38678afec271323422 diff --git a/go.mod b/go.mod index e38f3209dd..d50090f6c1 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,6 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.12.1 github.com/multiformats/go-multihash v0.2.3 - github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 @@ -90,7 +89,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect @@ -103,7 +102,7 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -234,6 +233,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect diff --git a/go.sum b/go.sum index 872afcafbf..d066f85214 100644 --- a/go.sum +++ b/go.sum @@ -264,8 +264,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= -github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -338,8 +338,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= -github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= From ac0978f6ad0758be8ae435d1c2fdafb4e68e41a4 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:14:06 -0700 Subject: [PATCH 035/103] Support batch data location for batch hashes --- arbnode/sequencer_inbox.go | 15 +++++++++++++++ arbutil/transaction_data.go | 12 ++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index d0cdebfeff..b743bf0ef9 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -33,6 +34,7 @@ const ( batchDataTxInput batchDataLocation = iota batchDataSeparateEvent batchDataNone + batchDataBlobHashes ) func init() { @@ -149,6 +151,19 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut case batchDataNone: // No data when in a force inclusion batch return nil, nil + case batchDataBlobHashes: + tx, err := arbutil.GetLogTransaction(ctx, client, m.rawLog) + if err != nil { + return nil, err + } + if len(tx.BlobHashes()) == 0 { + return nil, fmt.Errorf("blob batch transaction %v has no blobs", tx.Hash()) + } + data := []byte{arbstate.BlobHashesHeaderFlag} + for _, h := range tx.BlobHashes() { + data = append(data, h[:]...) + } + return data, nil default: return nil, fmt.Errorf("batch has invalid data location %v", m.dataLocation) } diff --git a/arbutil/transaction_data.go b/arbutil/transaction_data.go index 7741af6e9b..8270a628bd 100644 --- a/arbutil/transaction_data.go +++ b/arbutil/transaction_data.go @@ -10,8 +10,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long -func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { +func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) (*types.Transaction, error) { tx, err := client.TransactionInBlock(ctx, log.BlockHash, log.TxIndex) if err != nil { return nil, err @@ -19,6 +18,15 @@ func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) if tx.Hash() != log.TxHash { return nil, fmt.Errorf("L1 client returned unexpected transaction hash %v when looking up block %v transaction %v with expected hash %v", tx.Hash(), log.BlockHash, log.TxIndex, log.TxHash) } + return tx, nil +} + +// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long +func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { + tx, err := GetLogTransaction(ctx, client, log) + if err != nil { + return nil, err + } if len(tx.Data()) < 4 { return nil, fmt.Errorf("log emitting transaction %v unexpectedly does not have enough data", tx.Hash()) } From 8ca7cf89c2c375a93c6510adfa09613bea830616 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:17:34 -0700 Subject: [PATCH 036/103] Fix beacon URL concatenation --- arbnode/blob_reader.go | 19 ++++++++++++++----- arbnode/node.go | 5 ++++- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index 02626efff5..cf7c266513 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net/http" + "net/url" "path" "github.com/ethereum/go-ethereum/common" @@ -22,8 +23,8 @@ import ( ) type BlobClient struct { - config BlobClientConfig ec arbutil.L1Interface + beaconUrl *url.URL httpClient *http.Client // The genesis time time and seconds per slot won't change so only request them once. @@ -43,12 +44,16 @@ func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") } -func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) *BlobClient { +func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { + beaconUrl, err := url.Parse(config.BeaconChainUrl) + if err != nil { + return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) + } return &BlobClient{ - config: config, ec: ec, + beaconUrl: beaconUrl, httpClient: &http.Client{}, - } + }, nil } type fullResult[T any] struct { @@ -60,7 +65,11 @@ func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath var empty T - req, err := http.NewRequestWithContext(ctx, "GET", path.Join(b.config.BeaconChainUrl, beaconPath), http.NoBody) + // not really a deep copy, but copies the Path part we care about + url := *b.beaconUrl + url.Path = path.Join(url.Path, beaconPath) + + req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) if err != nil { return empty, err } diff --git a/arbnode/node.go b/arbnode/node.go index 9f5626bbaf..5990cfdbec 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -517,7 +517,10 @@ func createNodeImpl( var blobReader arbstate.BlobReader if config.BlobClient.BeaconChainUrl != "" { - blobReader = NewBlobClient(config.BlobClient, l1client) + blobReader, err = NewBlobClient(config.BlobClient, l1client) + if err != nil { + return nil, err + } } inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) From ef832b23d559e1ff24142d3128a4bfd910e2148f Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:30:29 -0700 Subject: [PATCH 037/103] Unmarshal beacon chain JSON integers as strings --- arbnode/blob_reader.go | 25 +++++++++++++------------ util/jsonapi/uint64_string.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 12 deletions(-) create mode 100644 util/jsonapi/uint64_string.go diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index cf7c266513..1424285832 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/util/pretty" "github.com/spf13/pflag" @@ -112,14 +113,14 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio } type blobResponseItem struct { - BlockRoot string `json:"block_root"` - Index int `json:"index"` - Slot uint64 `json:"slot"` - BlockParentRoot string `json:"block_parent_root"` - ProposerIndex uint64 `json:"proposer_index"` - Blob hexutil.Bytes `json:"blob"` - KzgCommitment hexutil.Bytes `json:"kzg_commitment"` - KzgProof hexutil.Bytes `json:"kzg_proof"` + BlockRoot string `json:"block_root"` + Index jsonapi.Uint64String `json:"index"` + Slot jsonapi.Uint64String `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex jsonapi.Uint64String `json:"proposer_index"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` } func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { @@ -181,7 +182,7 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas } type genesisResponse struct { - GenesisTime uint64 `json:"genesis_time"` + GenesisTime jsonapi.Uint64String `json:"genesis_time"` // don't currently care about other fields, add if needed } @@ -193,12 +194,12 @@ func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { if err != nil { return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) } - b.cachedGenesisTime = gr.GenesisTime + b.cachedGenesisTime = uint64(gr.GenesisTime) return b.cachedGenesisTime, nil } type getSpecResponse struct { - SecondsPerSlot uint64 `json:"SECONDS_PER_SLOT"` + SecondsPerSlot jsonapi.Uint64String `json:"SECONDS_PER_SLOT"` } func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { @@ -209,7 +210,7 @@ func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { if err != nil { return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) } - b.cachedSecondsPerSlot = gr.SecondsPerSlot + b.cachedSecondsPerSlot = uint64(gr.SecondsPerSlot) return b.cachedSecondsPerSlot, nil } diff --git a/util/jsonapi/uint64_string.go b/util/jsonapi/uint64_string.go new file mode 100644 index 0000000000..27cbb18526 --- /dev/null +++ b/util/jsonapi/uint64_string.go @@ -0,0 +1,32 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package jsonapi + +import ( + "fmt" + "strconv" +) + +// Uint64String is a uint64 that JSON marshals and unmarshals as string in decimal +type Uint64String uint64 + +func (u *Uint64String) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + return nil + } + + // Parse string as uint64, removing quotes + value, err := strconv.ParseUint(s[1:len(s)-1], 10, 64) + if err != nil { + return err + } + + *u = Uint64String(value) + return nil +} + +func (u Uint64String) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%d\"", uint64(u))), nil +} From 13ed4c63a4e8822d93354171e74b43e89bd7646b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:30:41 -0700 Subject: [PATCH 038/103] Fix blob decoding not accepting trailing bytes --- util/blobs/blobs.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 9f6c8d1303..02a22556a7 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -4,6 +4,7 @@ package blobs import ( + "bytes" "crypto/sha256" "github.com/ethereum/go-ethereum/common" @@ -47,7 +48,7 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { } } var outputData []byte - err := rlp.DecodeBytes(rlpData, &outputData) + err := rlp.Decode(bytes.NewReader(rlpData), &outputData) return outputData, err } From b48224390ae90975a7d249fe2cc2fcc266fd09d3 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:54:09 -0700 Subject: [PATCH 039/103] Fix arbitrator tests and hopefully CI --- .github/workflows/codeql-analysis.yml | 3 +++ .../prover/test-cases/rust/data/msg0.bin | Bin 1 -> 2 bytes .../prover/test-cases/rust/data/msg1.bin | Bin 32 -> 32 bytes .../prover/test-cases/rust/src/bin/host-io.rs | 19 +++++++++++------- contracts | 2 +- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8fb9d80c21..8b7ebd0e15 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -75,6 +75,9 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Cache Rust Build Products uses: actions/cache@v3 with: diff --git a/arbitrator/prover/test-cases/rust/data/msg0.bin b/arbitrator/prover/test-cases/rust/data/msg0.bin index 5cd813e5c5f312673ce9cf39fb832fb2d55116cc..7eb0b7fdf9dac856acf46ccada40589a72c84592 100644 GIT binary patch literal 2 JcmZQr1pol70IL81 literal 1 IcmZ1l00651s{jB1 diff --git a/arbitrator/prover/test-cases/rust/data/msg1.bin b/arbitrator/prover/test-cases/rust/data/msg1.bin index 2ea3dec3e1641a99198fd86a817f906db6ddaf76..fefa1cc823e0ec463b7923972b03901b52808fc1 100644 GIT binary patch delta 6 NcmY#TU}Tse4*&wG0Pz3- literal 32 ncmZQ%VrF4wW9Q)H;^yJy;};MV5*85^6PJ*bl9rK`lUD!$7k~m0 diff --git a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs index d16f60ff50..679ee14486 100644 --- a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs +++ b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs @@ -29,24 +29,29 @@ fn main() { let mut bytebuffer = Bytes32([0x0; 32]); // in delayed inbox - we're skipping the "kind" byte println!("delayed inbox message 0"); + let mut expected_buffer = bytebuffer.0; let len = wavm_read_delayed_inbox_message(0, bytebuffer.0.as_mut_ptr(), DELAYED_HEADER_LEN); - assert_eq!(len, 2); - assert_eq!(bytebuffer.0[1], 0xaa); + assert_eq!(len, 3); + expected_buffer[2] = 0xaa; + assert_eq!(bytebuffer.0, expected_buffer); println!("delayed inbox message 1"); let len = wavm_read_delayed_inbox_message(1, bytebuffer.0.as_mut_ptr(), DELAYED_HEADER_LEN); assert_eq!(len, 32); - for j in 1..31 { - assert_eq!(bytebuffer.0[j], (j as u8)); + for j in 1..32 { + assert_eq!(bytebuffer.0[j], (j as u8) - 1); } println!("inbox message 0"); + expected_buffer = bytebuffer.0; let len = wavm_read_inbox_message(0, bytebuffer.0.as_mut_ptr(), INBOX_HEADER_LEN); - assert_eq!(len, 1); - assert_eq!(bytebuffer.0[0], 0xaa); + expected_buffer[0] = 0; + expected_buffer[1] = 0xaa; + assert_eq!(len, 2); + assert_eq!(bytebuffer.0, expected_buffer); println!("inbox message 1"); let len = wavm_read_inbox_message(1, bytebuffer.0.as_mut_ptr(), INBOX_HEADER_LEN); assert_eq!(len, 32); for j in 0..32 { - assert_eq!(bytebuffer.0[j], (j as u8) + 1); + assert_eq!(bytebuffer.0[j], (j as u8)); } let keccak_hash = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"); diff --git a/contracts b/contracts index e253b8b1b5..cd5093d45e 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit e253b8b1b5865f135ac63ea3d3cea1bfe8ef2ad7 +Subproject commit cd5093d45ef0353fc5b2718ead70bd7f36e1a92c From ae7a199bfd6c00b7438f94d82f93dfa97d633c8f Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:22:21 -0700 Subject: [PATCH 040/103] Use KZG preimage support in replay binary --- cmd/replay/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index b634a2d5bb..dd8a0fd1f7 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -130,9 +130,9 @@ func (r *BlobPreimageReader) GetBlobs( var blobs []kzg4844.Blob for _, h := range versionedHashes { var blob kzg4844.Blob - var preimage []byte - if true { - panic("TODO: fill in preimage with wavmio.ResolvePreimage(h, wavmio.PreimageTypeEthVersionedHash) once KZG proof support is merged into this branch") + preimage, err := wavmio.ResolveTypedPreimage(arbutil.EthVersionedHashPreimageType, h) + if err != nil { + return nil, err } if len(preimage) != len(blob) { return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) From 3de7b73eb70c5711acce731981d36cb26c2dffd2 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:22:48 -0700 Subject: [PATCH 041/103] Fix arbitrator handling of non-keccak preimages --- arbitrator/jit/src/wavmio.rs | 4 ++-- arbitrator/prover/src/lib.rs | 18 +++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/arbitrator/jit/src/wavmio.rs b/arbitrator/jit/src/wavmio.rs index a398cb22f5..dfc7f21779 100644 --- a/arbitrator/jit/src/wavmio.rs +++ b/arbitrator/jit/src/wavmio.rs @@ -193,8 +193,8 @@ pub fn resolve_preimage_impl( }; let offset = match u32::try_from(offset) { - Ok(offset) => offset as usize, - Err(_) => error!("bad offset {offset} in {name}"), + Ok(offset) if offset % 32 == 0 => offset as usize, + _ => error!("bad offset {offset} in {name}"), }; let len = std::cmp::min(32, preimage.len().saturating_sub(offset)); diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 8285c011df..c7610ab31f 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -19,7 +19,6 @@ use crate::machine::{argument_data_to_inbox, Machine}; use arbutil::PreimageType; use eyre::Result; use machine::{get_empty_preimage_resolver, GlobalState, MachineStatus, PreimageResolver}; -use sha3::{Digest, Keccak256}; use static_assertions::const_assert_eq; use std::{ ffi::CStr, @@ -303,13 +302,18 @@ pub unsafe extern "C" fn arbitrator_set_preimage_resolver( return None; } let data = CBytes::from_raw_parts(res.ptr, res.len as usize); - let have_hash = Keccak256::digest(&data); - if have_hash.as_slice() != *hash { - panic!( - "Resolved incorrect data for hash {}: got {}", + #[cfg(debug_assertions)] + match crate::utils::hash_preimage(&data, ty) { + Ok(have_hash) if have_hash.as_slice() == *hash => {} + Ok(got_hash) => panic!( + "Resolved incorrect data for hash {} (rehashed to {})", hash, - hex::encode(data), - ); + Bytes32(got_hash), + ), + Err(err) => panic!( + "Failed to hash preimage from resolver (expecting hash {}): {}", + hash, err, + ), } Some(data) }, From e82ac7272443055041ef286af187fb225e44d8ea Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:23:04 -0700 Subject: [PATCH 042/103] Provide blob preimages during validation --- arbnode/inbox_reader.go | 15 ++++--- arbnode/node.go | 1 + arbnode/transaction_streamer.go | 2 +- execution/gethexec/block_recorder.go | 7 +-- execution/gethexec/executionengine.go | 5 ++- execution/interface.go | 2 +- staker/block_validator.go | 30 +++++++------ staker/stateless_block_validator.go | 56 ++++++++++++++++-------- system_tests/full_challenge_impl_test.go | 4 +- system_tests/staker_test.go | 2 + validator/validation_entry.go | 5 ++- 11 files changed, 81 insertions(+), 48 deletions(-) diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 9c830e3c89..5fca3c7eea 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -401,7 +402,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) } - return r.GetSequencerMessageBytes(ctx, batchNum) + data, _, err := r.GetSequencerMessageBytes(ctx, batchNum) + return data, err }) if err != nil { return err @@ -570,24 +572,25 @@ func (r *InboxReader) getNextBlockToRead() (*big.Int, error) { return msgBlock, nil } -func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, error) { +func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) { metadata, err := r.tracker.GetBatchMetadata(seqNum) if err != nil { - return nil, err + return nil, common.Hash{}, err } blockNum := arbmath.UintToBig(metadata.ParentChainBlock) seqBatches, err := r.sequencerInbox.LookupBatchesInRange(ctx, blockNum, blockNum) if err != nil { - return nil, err + return nil, common.Hash{}, err } var seenBatches []uint64 for _, batch := range seqBatches { if batch.SequenceNumber == seqNum { - return batch.Serialize(ctx, r.client) + data, err := batch.Serialize(ctx, r.client) + return data, batch.BlockHash, err } seenBatches = append(seenBatches, batch.SequenceNumber) } - return nil, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) + return nil, common.Hash{}, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) } func (r *InboxReader) GetLastReadBlockAndBatchCount() (uint64, uint64) { diff --git a/arbnode/node.go b/arbnode/node.go index 5990cfdbec..de9745f2a8 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -542,6 +542,7 @@ func createNodeImpl( exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), daReader, + blobReader, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 24ef2a7cc4..f96d51ce0e 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -820,7 +820,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return nil } -func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, error) { +func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, common.Hash, error) { return s.inboxReader.GetSequencerMessageBytes(context.TODO(), batchNum) } diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index a0f6d837e4..d7e702f3c1 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -123,13 +123,14 @@ func (r *BlockRecorder) RecordBlockCreation( var readBatchInfo []validator.BatchInfo if msg != nil { batchFetcher := func(batchNum uint64) ([]byte, error) { - data, err := r.execEngine.streamer.FetchBatch(batchNum) + data, blockHash, err := r.execEngine.streamer.FetchBatch(batchNum) if err != nil { return nil, err } readBatchInfo = append(readBatchInfo, validator.BatchInfo{ - Number: batchNum, - Data: data, + Number: batchNum, + BlockHash: blockHash, + Data: data, }) return data, nil } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 58e91a197e..20e9ca6f3b 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -449,7 +449,10 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb, s.bc, s.bc.Config(), - s.streamer.FetchBatch, + func(batchNum uint64) ([]byte, error) { + data, _, err := s.streamer.FetchBatch(batchNum) + return data, err + }, ) return block, statedb, receipts, err diff --git a/execution/interface.go b/execution/interface.go index ef9409b9c1..5f7c01719e 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -73,7 +73,7 @@ type FullExecutionClient interface { // not implemented in execution, used as input type BatchFetcher interface { - FetchBatch(batchNum uint64) ([]byte, error) + FetchBatch(batchNum uint64) ([]byte, common.Hash, error) } type TransactionStreamer interface { diff --git a/staker/block_validator.go b/staker/block_validator.go index 352335a5db..fad5e9c39a 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -45,11 +45,12 @@ type BlockValidator struct { chainCaughtUp bool // can only be accessed from creation thread or if holding reorg-write - nextCreateBatch []byte - nextCreateBatchMsgCount arbutil.MessageIndex - nextCreateBatchReread bool - nextCreateStartGS validator.GoGlobalState - nextCreatePrevDelayed uint64 + nextCreateBatch []byte + nextCreateBatchBlockHash common.Hash + nextCreateBatchMsgCount arbutil.MessageIndex + nextCreateBatchReread bool + nextCreateStartGS validator.GoGlobalState + nextCreatePrevDelayed uint64 // can only be accessed from from validation thread or if holding reorg-write lastValidGS validator.GoGlobalState @@ -455,23 +456,23 @@ func (v *BlockValidator) SetCurrentWasmModuleRoot(hash common.Hash) error { ) } -func (v *BlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, arbutil.MessageIndex, error) { +func (v *BlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, common.Hash, arbutil.MessageIndex, error) { batchCount, err := v.inboxTracker.GetBatchCount() if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } if batchCount <= batchNum { - return false, nil, 0, nil + return false, nil, common.Hash{}, 0, nil } batchMsgCount, err := v.inboxTracker.GetBatchMessageCount(batchNum) if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } - batch, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum) + batch, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum) if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } - return true, batch, batchMsgCount, nil + return true, batch, batchBlockHash, batchMsgCount, nil } func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, error) { @@ -500,11 +501,12 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e } if v.nextCreateStartGS.PosInBatch == 0 || v.nextCreateBatchReread { // new batch - found, batch, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch) + found, batch, batchBlockHash, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch) if !found { return false, err } v.nextCreateBatch = batch + v.nextCreateBatchBlockHash = batchBlockHash v.nextCreateBatchMsgCount = count validatorMsgCountCurrentBatch.Update(int64(count)) v.nextCreateBatchReread = false @@ -522,7 +524,7 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e } else { return false, fmt.Errorf("illegal batch msg count %d pos %d batch %d", v.nextCreateBatchMsgCount, pos, endGS.Batch) } - entry, err := newValidationEntry(pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreatePrevDelayed) + entry, err := newValidationEntry(pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreateBatchBlockHash, v.nextCreatePrevDelayed) if err != nil { return false, err } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index d35304be27..13b16e42cd 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -39,6 +39,7 @@ type StatelessBlockValidator struct { streamer TransactionStreamerInterface db ethdb.Database daService arbstate.DataAvailabilityReader + blobReader arbstate.BlobReader moduleMutex sync.Mutex currentWasmModuleRoot common.Hash @@ -67,7 +68,7 @@ type TransactionStreamerInterface interface { } type InboxReaderInterface interface { - GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, error) + GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) } type GlobalStatePosition struct { @@ -186,11 +187,13 @@ func newValidationEntry( end validator.GoGlobalState, msg *arbostypes.MessageWithMetadata, batch []byte, + batchBlockHash common.Hash, prevDelayed uint64, ) (*validationEntry, error) { batchInfo := validator.BatchInfo{ - Number: start.Batch, - Data: batch, + Number: start.Batch, + BlockHash: batchBlockHash, + Data: batch, } hasDelayed := false var delayedNum uint64 @@ -219,6 +222,7 @@ func NewStatelessBlockValidator( recorder execution.ExecutionRecorder, arbdb ethdb.Database, das arbstate.DataAvailabilityReader, + blobReader arbstate.BlobReader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -235,6 +239,7 @@ func NewStatelessBlockValidator( streamer: streamer, db: arbdb, daService: das, + blobReader: blobReader, } return validator, nil } @@ -285,20 +290,35 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * continue } if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { - // TODO: fetch blob preimages - panic("TODO: fetch blob preimages") - } - if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { - continue - } - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, - ) + payload := batch.Data[41:] + if len(payload)%len(common.Hash{}) != 0 { + return fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(payload)/len(common.Hash{})) + for i := 0; i*32 < len(payload); i += 1 { + copy(versionedHashes[i][:], payload[i*32:(i+1)*32]) + } + blobs, err := v.blobReader.GetBlobs(ctx, batch.BlockHash, versionedHashes) if err != nil { - return err + return fmt.Errorf("failed to get blobs: %w", err) + } + if e.Preimages[arbutil.EthVersionedHashPreimageType] == nil { + e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) + } + for i, blob := range blobs { + e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] + } + } + if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { + if v.daService == nil { + log.Warn("No DAS configured, but sequencer message found with DAS header") + } else { + _, err := arbstate.RecoverPayloadFromDasBatch( + ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, + ) + if err != nil { + return err + } } } } @@ -363,11 +383,11 @@ func (v *StatelessBlockValidator) CreateReadyValidationEntry(ctx context.Context } start := buildGlobalState(*prevResult, startPos) end := buildGlobalState(*result, endPos) - seqMsg, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber) + seqMsg, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber) if err != nil { return nil, err } - entry, err := newValidationEntry(pos, start, end, msg, seqMsg, prevDelayed) + entry, err := newValidationEntry(pos, start, end, msg, seqMsg, batchBlockHash, prevDelayed) if err != nil { return nil, err } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 0fa483b6ea..b8f891e3e7 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -385,7 +385,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -402,7 +402,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 6e3ffd6125..62e89ff782 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -208,6 +208,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeA, l2nodeA.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -260,6 +261,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeB, l2nodeB.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) diff --git a/validator/validation_entry.go b/validator/validation_entry.go index fed1940f1f..8bb021335e 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -6,8 +6,9 @@ import ( ) type BatchInfo struct { - Number uint64 - Data []byte + Number uint64 + BlockHash common.Hash + Data []byte } type ValidationInput struct { From 59a8935a9bb651ce43370e441db2cae702a2f004 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:31:50 -0700 Subject: [PATCH 043/103] Fix arbitrator CI --- .github/workflows/arbitrator-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index ba3cf016f5..54a948e04a 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -161,4 +161,5 @@ jobs: cd contracts yarn install yarn build + yarn build:forge:yul yarn hardhat --network localhost test test/prover/*.ts From f646430e65d44936e984ec6e4d8cb47044801ba0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:43:42 -0700 Subject: [PATCH 044/103] Fix Go test --- system_tests/meaningless_reorg_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index f09f68041a..e1715dc635 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -95,7 +95,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { time.Sleep(10 * time.Millisecond) } - _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) + _, _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) Require(t, err) l2Header, err := builder.L2.Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) From d2b7c9db14667046d08557d1503cf38dad267117 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 21:26:43 -0700 Subject: [PATCH 045/103] Strengthen Uint64String parsing --- util/jsonapi/uint64_string.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/util/jsonapi/uint64_string.go b/util/jsonapi/uint64_string.go index 27cbb18526..980b97a949 100644 --- a/util/jsonapi/uint64_string.go +++ b/util/jsonapi/uint64_string.go @@ -4,6 +4,7 @@ package jsonapi import ( + "encoding/json" "fmt" "strconv" ) @@ -12,13 +13,18 @@ import ( type Uint64String uint64 func (u *Uint64String) UnmarshalJSON(b []byte) error { - s := string(b) - if s == "null" { + jsonString := string(b) + if jsonString == "null" { return nil } - // Parse string as uint64, removing quotes - value, err := strconv.ParseUint(s[1:len(s)-1], 10, 64) + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + value, err := strconv.ParseUint(s, 10, 64) if err != nil { return err } From 277f241004def543f547bdb65aa4688216994bbc Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 23:40:07 -0700 Subject: [PATCH 046/103] Pack more bits into blobs --- util/blobs/blobs.go | 74 ++++++++++++++++++++++++++++++++-------- util/blobs/blobs_test.go | 52 ++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 util/blobs/blobs_test.go diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 55df57f9d1..2852f2b29f 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -14,6 +14,44 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +func fillBlobBytes(blob []byte, data []byte) []byte { + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + startIdx := fieldElement*32 + 1 + copy(blob[startIdx:startIdx+31], data) + if len(data) <= 31 { + return nil + } + data = data[31:] + } + return data +} + +// The number of bits in a BLS scalar that aren't part of a whole byte. +const spareBlobBits = 6 // = math.floor(math.log2(BLS_MODULUS)) % 8 + +func fillBlobBits(blob []byte, data []byte) ([]byte, error) { + var acc uint16 + accBits := 0 + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + if accBits < spareBlobBits && len(data) > 0 { + acc |= uint16(data[0]) << accBits + accBits += 8 + data = data[1:] + } + blob[fieldElement*32] = uint8(acc & ((1 << spareBlobBits) - 1)) + accBits -= spareBlobBits + if accBits < 0 { + // We're out of data + break + } + acc >>= spareBlobBits + } + if accBits > 0 { + return nil, fmt.Errorf("somehow ended up with %v spare accBits", accBits) + } + return data, nil +} + // EncodeBlobs takes in raw bytes data to convert into blobs used for KZG commitment EIP-4844 // transactions on Ethereum. func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { @@ -21,21 +59,15 @@ func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { if err != nil { return nil, err } - blobs := []kzg4844.Blob{{}} - blobIndex := 0 - fieldIndex := -1 - for i := 0; i < len(data); i += 31 { - fieldIndex++ - if fieldIndex == params.BlobTxFieldElementsPerBlob { - blobs = append(blobs, kzg4844.Blob{}) - blobIndex++ - fieldIndex = 0 - } - max := i + 31 - if max > len(data) { - max = len(data) + var blobs []kzg4844.Blob + for len(data) > 0 { + var b kzg4844.Blob + data = fillBlobBytes(b[:], data) + data, err = fillBlobBits(b[:], data) + if err != nil { + return nil, err } - copy(blobs[blobIndex][fieldIndex*32+1:], data[i:max]) + blobs = append(blobs, b) } return blobs, nil } @@ -47,6 +79,20 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { rlpData = append(rlpData, blob[fieldIndex*32+1:(fieldIndex+1)*32]...) } + var acc uint16 + accBits := 0 + for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { + acc |= uint16(blob[fieldIndex*32]) << accBits + accBits += spareBlobBits + if accBits >= 8 { + rlpData = append(rlpData, uint8(acc)) + acc >>= 8 + accBits -= 8 + } + } + if accBits != 0 { + return nil, fmt.Errorf("somehow ended up with %v spare accBits", accBits) + } } var outputData []byte err := rlp.Decode(bytes.NewReader(rlpData), &outputData) diff --git a/util/blobs/blobs_test.go b/util/blobs/blobs_test.go new file mode 100644 index 0000000000..753b50a489 --- /dev/null +++ b/util/blobs/blobs_test.go @@ -0,0 +1,52 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package blobs + +import ( + "bytes" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/params" +) + +const bytesEncodedPerBlob = 254 * 4096 / 8 + +var blsModulus, _ = new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10) + +func TestBlobEncoding(t *testing.T) { + r := rand.New(rand.NewSource(1)) +outer: + for i := 0; i < 40; i++ { + data := make([]byte, r.Int()%bytesEncodedPerBlob*3) + _, err := r.Read(data) + if err != nil { + t.Fatalf("failed to generate random bytes: %v", err) + } + enc, err := EncodeBlobs(data) + if err != nil { + t.Errorf("failed to encode blobs for length %v: %v", len(data), err) + continue + } + for _, b := range enc { + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + bigInt := new(big.Int).SetBytes(b[fieldElement*32 : (fieldElement+1)*32]) + if bigInt.Cmp(blsModulus) >= 0 { + t.Errorf("for length %v blob %v has field element %v value %v >= modulus %v", len(data), b, fieldElement, bigInt, blsModulus) + continue outer + } + } + } + dec, err := DecodeBlobs(enc) + if err != nil { + t.Errorf("failed to decode blobs for length %v: %v", len(data), err) + continue + } + if !bytes.Equal(data, dec) { + t.Errorf("got different decoding for length %v", len(data)) + continue + } + } +} From 03b2fe8a801d5429b759603a1fd12d9f8d4186f6 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 24 Jan 2024 08:13:20 -0700 Subject: [PATCH 047/103] Add separate max batch size for 4844 --- arbnode/batch_poster.go | 66 ++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 4a07d36521..65ef76e0a0 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -118,6 +118,8 @@ type BatchPosterConfig struct { DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` // Max batch size. MaxSize int `koanf:"max-size" reload:"hot"` + // Maximum 4844 blob enabled batch size. + Max4844BatchSize int `koanf:"max-4844-batch-size" reload:"hot"` // Max batch post delay. MaxDelay time.Duration `koanf:"max-delay" reload:"hot"` // Wait for max BatchPost delay. @@ -174,6 +176,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxDelay, "wait for the max batch delay, even if the batch is full") f.Duration(prefix+".poll-interval", DefaultBatchPosterConfig.PollInterval, "how long to wait after no batches are ready to be posted before checking again") @@ -197,7 +200,9 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go - MaxSize: 100000, + MaxSize: 100000, + // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? + Max4844BatchSize: (254 * params.BlobTxFieldElementsPerBlob / 8 * (params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob)) - 1000, PollInterval: time.Second * 10, ErrorDelay: time.Second * 10, MaxDelay: time.Hour, @@ -227,6 +232,7 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ var TestBatchPosterConfig = BatchPosterConfig{ Enable: true, MaxSize: 100000, + Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, PollInterval: time.Millisecond * 10, ErrorDelay: time.Millisecond * 10, MaxDelay: 0, @@ -552,13 +558,20 @@ type buildingBatch struct { startMsgCount arbutil.MessageIndex msgCount arbutil.MessageIndex haveUsefulMessage bool + use4844 bool } -func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { - compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxSize*2)) - if config.MaxSize <= 40 { - panic("MaxBatchSize too small") +func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64, use4844 bool) *batchSegments { + maxSize := config.MaxSize + if use4844 { + maxSize = config.Max4844BatchSize + } else { + if maxSize <= 40 { + panic("Maximum batch size too small") + } + maxSize -= 40 } + compressedBuffer := bytes.NewBuffer(make([]byte, 0, maxSize*2)) compressionLevel := config.CompressionLevel recompressionLevel := config.CompressionLevel if backlog > 20 { @@ -582,7 +595,7 @@ func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog ui return &batchSegments{ compressedBuffer: compressedBuffer, compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: config.MaxSize - 40, // TODO + sizeLimit: maxSize, recompressionLevel: recompressionLevel, rawSegments: make([][]byte, 0, 128), delayedMsg: firstDelayed, @@ -936,10 +949,29 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if b.building == nil || b.building.startMsgCount != batchPosition.MessageCount { + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + var use4844 bool + config := b.config() + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.ForcePost4844Blobs { + use4844 = true + } else { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + } b.building = &buildingBatch{ - segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate()), + segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate(), use4844), msgCount: batchPosition.MessageCount, startMsgCount: batchPosition.MessageCount, + use4844: use4844, } } msgCount, err := b.streamer.GetMessageCount() @@ -1115,26 +1147,12 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } - latestHeader, err := b.l1Reader.LastHeader(ctx) + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) if err != nil { return false, err } - var use4844 bool - if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - if config.ForcePost4844Blobs { - use4844 = true - } else { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) - } - } - data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, use4844) - if err != nil { - return false, err + if len(kzgBlobs)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + return false, fmt.Errorf("produced %v blobs for batch but a block can only hold %v", len(kzgBlobs), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) } accessList := b.accessList(int(batchPosition.NextSeqNum), int(b.building.segments.delayedMsg)) // On restart, we may be trying to estimate gas for a batch whose successor has From ec2541b8e79d35c280301136717af707e844ae95 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 15:33:24 +0100 Subject: [PATCH 048/103] Fix nitro-fuzzer entrypoint argument --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9f8a5a75df..8a4b40e1fd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -198,7 +198,7 @@ FROM debian:bookworm-slim as nitro-fuzzer COPY --from=fuzz-builder /workspace/fuzzers/*.fuzz /usr/local/bin/ COPY ./scripts/fuzz.bash /usr/local/bin RUN mkdir /fuzzcache -ENTRYPOINT [ "/usr/local/bin/fuzz.bash", "--binary-path", "/usr/local/bin/", "--fuzzcache-path", "/fuzzcache" ] +ENTRYPOINT [ "/usr/local/bin/fuzz.bash", "FuzzStateTransition", "--binary-path", "/usr/local/bin/", "--fuzzcache-path", "/fuzzcache" ] FROM debian:bookworm-slim as nitro-node-slim WORKDIR /home/user From 4445f9337909e5da6ab5118dd858feac244efe3d Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 15:44:07 +0100 Subject: [PATCH 049/103] Fix script to complain when neither build nor fuzzer name is specified --- scripts/fuzz.bash | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/fuzz.bash b/scripts/fuzz.bash index d236f90ce8..f1f84d9ba1 100755 --- a/scripts/fuzz.bash +++ b/scripts/fuzz.bash @@ -83,6 +83,11 @@ while [[ $# -gt 0 ]]; do esac done +if [[ "$run_build" == "false" && -z "$test_group" ]]; then + echo you must specify either --build flag or fuzzer-name + printusage +fi + if $run_build; then for build_group in system_tests arbstate; do go test -c ${nitropath}/${build_group} -fuzz Fuzz -o "$binpath"/${build_group}.fuzz From 2e5832e33b91fa851c2267ad76cddec59d335f36 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 16:26:54 +0100 Subject: [PATCH 050/103] Add flag to fuzz script for timeout --- scripts/fuzz.bash | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/fuzz.bash b/scripts/fuzz.bash index f1f84d9ba1..91373d81f4 100755 --- a/scripts/fuzz.bash +++ b/scripts/fuzz.bash @@ -7,12 +7,14 @@ cd "$mydir" function printusage { echo Usage: $0 --build \[--binary-path PATH\] - echo " " $0 \ \[--binary-path PATH\] \[--fuzzcache-path PATH\] \[--nitro-path PATH\] + echo " " $0 \ \[--binary-path PATH\] \[--fuzzcache-path PATH\] \[--nitro-path PATH\] \[--duration DURATION\] echo echo fuzzer names: echo " " FuzzPrecompiles echo " " FuzzInboxMultiplexer echo " " FuzzStateTransition + echo + echo " " duration in minutes } if [[ $# -eq 0 ]]; then @@ -26,6 +28,7 @@ fuzzcachepath=../target/var/fuzz-cache nitropath=../ run_build=false test_group="" +duration=60 while [[ $# -gt 0 ]]; do case $1 in --nitro-path) @@ -55,6 +58,15 @@ while [[ $# -gt 0 ]]; do shift shift ;; + --duration) + duration="$2" + if ! [[ "$duration" =~ ^[0-9]+$ ]]; then + echo "Invalid timeout duration. Please specify positive integer (in minutes)" + exit 1 + fi + shift + shift + ;; --build) run_build=true shift @@ -95,5 +107,5 @@ if $run_build; then fi if [[ ! -z $test_group ]]; then - "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name + timeout "$((60 * duration))" "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name fi From 601681a1e758ee3bb0b09989fc76e27adb8b1739 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 16:31:53 +0100 Subject: [PATCH 051/103] Add workflow for release ci that is triggered with PR tag, run nitro fuzzer in it --- .github/workflows/release-ci.yml | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 .github/workflows/release-ci.yml diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml new file mode 100644 index 0000000000..729b2696ff --- /dev/null +++ b/.github/workflows/release-ci.yml @@ -0,0 +1,37 @@ +name: Release CI +run-name: Release CI triggered from @${{ github.actor }} of ${{ github.head_ref }} + +on: + pull_request: + types: + - master + - develop + +jobs: + build_and_run: + runs-on: ubuntu-8 + + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Check PR Title For Release Candidate Tag ([Release]) + run: | + if [[ ${{ github.event.pull_request.title }} == "[Release]" ]]; then + echo "Release candidate." + else + echo "Not a release candidate. Skipping workflow." + exit 0 + fi + + - name: Build nitro-fuzzer Docker Image + run: docker build --target nitro-fuzzer -t nitro-fuzzer-image . + + - name: Run Docker Container + run: docker run --name nitro-fuzzer-container nitro-fuzzer-image & + + - name: Stop Docker Container + run: docker stop nitro-fuzzer-container From 86cf5ca837284a0f2671799be9c68aa457f070a3 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 16:41:03 +0100 Subject: [PATCH 052/103] Trigger workflow from label instead of PR title --- .github/workflows/release-ci.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 729b2696ff..8dff82ec9d 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -18,9 +18,9 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - - name: Check PR Title For Release Candidate Tag ([Release]) + - name: Check PR Label For Release label run: | - if [[ ${{ github.event.pull_request.title }} == "[Release]" ]]; then + if contains(github.event.pull_request.labels.*.name, 'release') echo "Release candidate." else echo "Not a release candidate. Skipping workflow." @@ -28,10 +28,7 @@ jobs: fi - name: Build nitro-fuzzer Docker Image - run: docker build --target nitro-fuzzer -t nitro-fuzzer-image . + run: docker build --target nitro-fuzzer -t nitro-fuzzer . - name: Run Docker Container - run: docker run --name nitro-fuzzer-container nitro-fuzzer-image & - - - name: Stop Docker Container - run: docker stop nitro-fuzzer-container + run: docker run nitro-fuzzer From 32ea163d1549ad40300370f09efa61b979ad8b45 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 16:44:53 +0100 Subject: [PATCH 053/103] Update workflow triggering conditions --- .github/workflows/release-ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 8dff82ec9d..14121e6ece 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -2,11 +2,13 @@ name: Release CI run-name: Release CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: + workflow_dispatch: + merge_group: pull_request: - types: + push: + branches: - master - develop - jobs: build_and_run: runs-on: ubuntu-8 From 991f055de6a9867c1b1d4514d1d9273114d24c50 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 17:01:58 +0100 Subject: [PATCH 054/103] Fix workflow syntax --- .github/workflows/release-ci.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 14121e6ece..9fbe22d87d 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -21,13 +21,10 @@ jobs: uses: docker/setup-buildx-action@v1 - name: Check PR Label For Release label + if: !contains(github.event.pull_request.labels.*.name, 'release') run: | - if contains(github.event.pull_request.labels.*.name, 'release') - echo "Release candidate." - else - echo "Not a release candidate. Skipping workflow." - exit 0 - fi + echo "Not a release candidate. Skipping workflow." + exit 0 - name: Build nitro-fuzzer Docker Image run: docker build --target nitro-fuzzer -t nitro-fuzzer . From e71ca4b5148db599b304bb9849067ae39f230873 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 17:08:32 +0100 Subject: [PATCH 055/103] Yet another attempt to fix workflow syntax --- .github/workflows/release-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 9fbe22d87d..b5dae57458 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -21,7 +21,7 @@ jobs: uses: docker/setup-buildx-action@v1 - name: Check PR Label For Release label - if: !contains(github.event.pull_request.labels.*.name, 'release') + if: ${{ !contains(github.event.*.labels.*.name, 'release') }} run: | echo "Not a release candidate. Skipping workflow." exit 0 From ee8cc8d2350c05ba5b429748aec8a94e06cf58fa Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 25 Jan 2024 17:17:45 +0100 Subject: [PATCH 056/103] Checout submodules recursively in the workflow, cache docker layers --- .github/workflows/release-ci.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index b5dae57458..4981513a13 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -14,11 +14,23 @@ jobs: runs-on: ubuntu-8 steps: - - name: Checkout Repository - uses: actions/checkout@v2 + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 + with: + driver-opts: network=host + + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ hashFiles('Dockerfile') }} + restore-keys: ${{ runner.os }}-buildx- + - name: Check PR Label For Release label if: ${{ !contains(github.event.*.labels.*.name, 'release') }} From 47053b95dfb3e9b516a177f70c91c5ccc176cef1 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 25 Jan 2024 13:08:33 -0700 Subject: [PATCH 057/103] In delayed sequencer, check accumulator against safe block hash --- arbnode/delayed.go | 41 +++++++++++++++++++++++++++++++----- arbnode/delayed_sequencer.go | 10 ++++++--- arbnode/inbox_reader.go | 3 ++- arbutil/wait_for_l1.go | 1 + 4 files changed, 46 insertions(+), 9 deletions(-) diff --git a/arbnode/delayed.go b/arbnode/delayed.go index 498aa0475f..2a1745c540 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "errors" + "fmt" "math/big" "sort" @@ -14,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -28,6 +30,7 @@ var messageDeliveredID common.Hash var inboxMessageDeliveredID common.Hash var inboxMessageFromOriginID common.Hash var l2MessageFromOriginCallABI abi.Method +var delayedInboxAccsCallABI abi.Method func init() { parsedIBridgeABI, err := bridgegen.IBridgeMetaData.GetAbi() @@ -35,6 +38,7 @@ func init() { panic(err) } messageDeliveredID = parsedIBridgeABI.Events["MessageDelivered"].ID + delayedInboxAccsCallABI = parsedIBridgeABI.Methods["delayedInboxAccs"] parsedIMessageProviderABI, err := bridgegen.IDelayedMessageProviderMetaData.GetAbi() if err != nil { @@ -95,12 +99,39 @@ func (b *DelayedBridge) GetMessageCount(ctx context.Context, blockNumber *big.In return bigRes.Uint64(), nil } -func (b *DelayedBridge) GetAccumulator(ctx context.Context, sequenceNumber uint64, blockNumber *big.Int) (common.Hash, error) { - opts := &bind.CallOpts{ - Context: ctx, - BlockNumber: blockNumber, +// Uses blockHash if nonzero, otherwise uses blockNumber +func (b *DelayedBridge) GetAccumulator(ctx context.Context, sequenceNumber uint64, blockNumber *big.Int, blockHash common.Hash) (common.Hash, error) { + calldata := append([]byte{}, delayedInboxAccsCallABI.ID...) + inputs, err := delayedInboxAccsCallABI.Inputs.Pack(arbmath.UintToBig(sequenceNumber)) + if err != nil { + return common.Hash{}, err + } + calldata = append(calldata, inputs...) + msg := ethereum.CallMsg{ + To: &b.address, + Data: calldata, + } + var result hexutil.Bytes + if blockHash != (common.Hash{}) { + result, err = b.client.CallContractAtHash(ctx, msg, blockHash) + } else { + result, err = b.client.CallContract(ctx, msg, blockNumber) + } + if err != nil { + return common.Hash{}, err + } + values, err := delayedInboxAccsCallABI.Outputs.Unpack(result) + if err != nil { + return common.Hash{}, err + } + if len(values) != 1 { + return common.Hash{}, fmt.Errorf("expected 1 return value from %v, got %v", delayedInboxAccsCallABI.Name, len(values)) + } + hash, ok := values[0].([32]byte) + if !ok { + return common.Hash{}, fmt.Errorf("expected [32]uint8 return value from %v, got %T", delayedInboxAccsCallABI.Name, values[0]) } - return b.con.DelayedInboxAccs(opts, new(big.Int).SetUint64(sequenceNumber)) + return hash, nil } type DelayedInboxMessage struct { diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index f1b912e0f7..8cbb094c16 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -100,16 +100,20 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock } var finalized uint64 + var finalizedHash common.Hash if config.UseMergeFinality && headerreader.HeaderIndicatesFinalitySupport(lastBlockHeader) { + var header *types.Header var err error if config.RequireFullFinality { - finalized, err = d.l1Reader.LatestFinalizedBlockNr(ctx) + header, err = d.l1Reader.LatestFinalizedBlockHeader(ctx) } else { - finalized, err = d.l1Reader.LatestSafeBlockNr(ctx) + header, err = d.l1Reader.LatestSafeBlockHeader(ctx) } if err != nil { return err } + finalized = header.Number.Uint64() + finalizedHash = header.Hash() } else { currentNum := lastBlockHeader.Number.Int64() if currentNum < config.FinalizeDistance { @@ -167,7 +171,7 @@ func (d *DelayedSequencer) sequenceWithoutLockout(ctx context.Context, lastBlock // Sequence the delayed messages, if any if len(messages) > 0 { - delayedBridgeAcc, err := d.bridge.GetAccumulator(ctx, pos-1, new(big.Int).SetUint64(finalized)) + delayedBridgeAcc, err := d.bridge.GetAccumulator(ctx, pos-1, new(big.Int).SetUint64(finalized), finalizedHash) if err != nil { return err } diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 9c830e3c89..7a2ffa505c 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -299,7 +300,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if checkingDelayedCount > 0 { checkingDelayedSeqNum := checkingDelayedCount - 1 - l1DelayedAcc, err := r.delayedBridge.GetAccumulator(ctx, checkingDelayedSeqNum, currentHeight) + l1DelayedAcc, err := r.delayedBridge.GetAccumulator(ctx, checkingDelayedSeqNum, currentHeight, common.Hash{}) if err != nil { return err } diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index b66710dbf0..a80502610b 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -23,6 +23,7 @@ type L1Interface interface { ethereum.TransactionReader TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) BlockNumber(ctx context.Context) (uint64, error) + CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) ChainID(ctx context.Context) (*big.Int, error) } From 14fb850901b57393809853b7d532537cf65c78d2 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Fri, 26 Jan 2024 12:04:44 +0100 Subject: [PATCH 058/103] Ignore timeout error, and error out only if the fuzzing binary errors out --- scripts/fuzz.bash | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/scripts/fuzz.bash b/scripts/fuzz.bash index 91373d81f4..6271b917b6 100755 --- a/scripts/fuzz.bash +++ b/scripts/fuzz.bash @@ -107,5 +107,12 @@ if $run_build; then fi if [[ ! -z $test_group ]]; then - timeout "$((60 * duration))" "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name + timeout "$((60 * duration))" "$binpath"/${test_group}.fuzz -test.run "^$" -test.fuzzcachedir "$fuzzcachepath" -test.fuzz $test_name || exit_status=$? fi + +if [ -n "$exit_status" ] && [ $exit_status -ne 0 ] && [ $exit_status -ne 124 ]; then + echo "Fuzzing failed." + exit $exit_status +fi + +echo "Fuzzing succeeded." From 1ba8e075fa8b4ca0a096c1a44e72c8ee6b9fd229 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sat, 27 Jan 2024 23:06:59 -0700 Subject: [PATCH 059/103] Move contracts back to previous pin --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index a8e7709bfc..cd5093d45e 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit a8e7709bfc918f9b8e2888d47f2fd8454779fd11 +Subproject commit cd5093d45ef0353fc5b2718ead70bd7f36e1a92c From f3b06106937970e573690ccf2c4feed6b04cf774 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 11:12:37 -0700 Subject: [PATCH 060/103] Support and test ArbOS 20 --- arbos/arbosState/arbosstate.go | 34 ++++++++++++++++------------ execution/gethexec/tx_pre_checker.go | 5 ++++ go-ethereum | 2 +- system_tests/block_validator_test.go | 1 + system_tests/estimation_test.go | 17 ++++++++++++++ 5 files changed, 43 insertions(+), 16 deletions(-) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index caf44e2a99..7fdb61aba2 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -277,14 +277,13 @@ func (state *ArbosState) UpgradeArbosVersion( } } - switch state.arbosVersion { - case 1: - ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) + nextArbosVersion := state.arbosVersion + 1 + switch nextArbosVersion { case 2: + ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) + case 3: ensure(state.l1PricingState.SetPerBatchGasCost(0)) ensure(state.l1PricingState.SetAmortizedCostCapBips(math.MaxUint64)) - case 3: - // no state changes needed case 4: // no state changes needed case 5: @@ -296,10 +295,12 @@ func (state *ArbosState) UpgradeArbosVersion( case 8: // no state changes needed case 9: + // no state changes needed + case 10: ensure(state.l1PricingState.SetL1FeesAvailable(stateDB.GetBalance( l1pricing.L1PricerFundsPoolAddress, ))) - case 10: + case 11: // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) @@ -316,26 +317,29 @@ func (state *ArbosState) UpgradeArbosVersion( ensure(state.chainOwners.ClearList()) } // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. - // TODO: currently you can't get to ArbOS 20 without hitting the default case. - case 19: + case 20: if !chainConfig.DebugMode() { // This upgrade isn't finalized so we only want to support it for testing return fmt.Errorf( "the chain is upgrading to unsupported ArbOS version %v, %w", - state.arbosVersion+1, + nextArbosVersion, ErrFatalNodeOutOfDate, ) } // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) default: - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - state.arbosVersion+1, - ErrFatalNodeOutOfDate, - ) + if nextArbosVersion >= 12 && state.arbosVersion < 20 { + // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. + } else { + return fmt.Errorf( + "the chain is upgrading to unsupported ArbOS version %v, %w", + nextArbosVersion, + ErrFatalNodeOutOfDate, + ) + } } - state.arbosVersion++ + state.arbosVersion = nextArbosVersion } if firstTime && upgradeTo >= 6 { diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go index 51ba88fec8..cff8b04d32 100644 --- a/execution/gethexec/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -116,6 +116,11 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if tx.Gas() < params.TxGas { return core.ErrIntrinsicGas } + if tx.Type() >= types.ArbitrumDepositTxType || tx.Type() == types.BlobTxType { + // Should be unreachable for Arbitrum types due to UnmarshalBinary not accepting Arbitrum internal txs + // and we want to disallow BlobTxType since Arbitrum doesn't support EIP-4844 txs yet. + return types.ErrTxTypeNotSupported + } sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number, header.Time), tx) if err != nil { return err diff --git a/go-ethereum b/go-ethereum index 214d1c1c9b..1acd9c64ac 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 214d1c1c9b6ef7830dfb22c3bba7563b89d18960 +Subproject commit 1acd9c64ac5804729475ef60aa578b4ec52fa0e6 diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index e2e4227bf6..1fcf2bab34 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -93,6 +93,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops contractCode = append(contractCode, byte(vm.PUSH0)) contractCode = append(contractCode, byte(vm.CODECOPY)) contractCode = append(contractCode, byte(vm.PUSH0)) + contractCode = append(contractCode, byte(vm.BLOBHASH)) contractCode = append(contractCode, byte(vm.RETURN)) basefee := builder.L2.GetBaseFee(t) var err error diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index eda7fb449f..6f47c14f1f 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/solgen/go/mocksgen" @@ -180,6 +181,22 @@ func TestDifficultyForArbOSTen(t *testing.T) { } } +func TestBlobBasefeeReverts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + _, err := builder.L2.Client.CallContract(ctx, ethereum.CallMsg{ + Data: []byte{byte(vm.BLOBBASEFEE)}, + }, nil) + if err == nil { + t.Error("Expected BLOBBASEFEE to revert") + } +} + func TestComponentEstimate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() From fe0d6231fc6196cbb1f048fe1e7c0f73ae2095d2 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 11:38:39 -0700 Subject: [PATCH 061/103] Add missing L1 pricing getters to ArbGasInfo in ArbOS 20 --- arbos/l1pricing/l1pricing.go | 8 -------- contracts | 2 +- precompiles/ArbGasInfo.go | 24 ++++++++++++++++++++++-- precompiles/precompile.go | 5 +++++ 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 27ecae8b85..f2312c46d4 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -146,10 +146,6 @@ func (ps *L1PricingState) SetPayRewardsTo(addr common.Address) error { return ps.payRewardsTo.Set(addr) } -func (ps *L1PricingState) GetRewardsRecepient() (common.Address, error) { - return ps.payRewardsTo.Get() -} - func (ps *L1PricingState) EquilibrationUnits() (*big.Int, error) { return ps.equilibrationUnits.Get() } @@ -174,10 +170,6 @@ func (ps *L1PricingState) SetPerUnitReward(weiPerUnit uint64) error { return ps.perUnitReward.Set(weiPerUnit) } -func (ps *L1PricingState) GetRewardsRate() (uint64, error) { - return ps.perUnitReward.Get() -} - func (ps *L1PricingState) LastUpdateTime() (uint64, error) { return ps.lastUpdateTime.Get() } diff --git a/contracts b/contracts index cd5093d45e..7b84be56ec 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit cd5093d45ef0353fc5b2718ead70bd7f36e1a92c +Subproject commit 7b84be56ec5352aaeb85c58fea7f50725e1927d9 diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index 378d48c780..cda5350a4a 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -164,12 +164,12 @@ func (con ArbGasInfo) GetL1BaseFeeEstimateInertia(c ctx, evm mech) (uint64, erro // GetL1RewardRate gets the L1 pricer reward rate func (con ArbGasInfo) GetL1RewardRate(c ctx, evm mech) (uint64, error) { - return c.State.L1PricingState().GetRewardsRate() + return c.State.L1PricingState().PerUnitReward() } // GetL1RewardRecipient gets the L1 pricer reward recipient func (con ArbGasInfo) GetL1RewardRecipient(c ctx, evm mech) (common.Address, error) { - return c.State.L1PricingState().GetRewardsRecepient() + return c.State.L1PricingState().PayRewardsTo() } // GetL1GasPriceEstimate gets the current estimate of the L1 basefee @@ -244,3 +244,23 @@ func (con ArbGasInfo) GetAmortizedCostCapBips(c ctx, evm mech) (uint64, error) { func (con ArbGasInfo) GetL1FeesAvailable(c ctx, evm mech) (huge, error) { return c.State.L1PricingState().L1FeesAvailable() } + +func (con ArbGasInfo) GetL1PricingEquilibrationUnits(c ctx, evm mech) (*big.Int, error) { + return c.State.L1PricingState().EquilibrationUnits() +} + +func (con ArbGasInfo) GetLastL1PricingUpdateTime(c ctx, evm mech) (uint64, error) { + return c.State.L1PricingState().LastUpdateTime() +} + +func (con ArbGasInfo) GetL1PricingFundsDueForRewards(c ctx, evm mech) (*big.Int, error) { + return c.State.L1PricingState().FundsDueForRewards() +} + +func (con ArbGasInfo) GetL1PricingUnitsSinceUpdate(c ctx, evm mech) (uint64, error) { + return c.State.L1PricingState().UnitsSinceUpdate() +} + +func (con ArbGasInfo) GetLastL1PricingSurplus(c ctx, evm mech) (*big.Int, error) { + return c.State.L1PricingState().LastSurplus() +} diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 330eb9a2e4..5d2ecce745 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -538,6 +538,11 @@ func Precompiles() map[addr]ArbosPrecompile { ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = 10 ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = 11 ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = 11 + ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = 20 insert(MakePrecompile(templates.ArbAggregatorMetaData, &ArbAggregator{Address: hex("6d")})) insert(MakePrecompile(templates.ArbStatisticsMetaData, &ArbStatistics{Address: hex("6f")})) From b7d26784af334b71be7567cc2e3682ed732898d8 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 11:41:45 -0700 Subject: [PATCH 062/103] Disable EIP-4844 batch posting by default for now --- arbnode/batch_poster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 65ef76e0a0..e3af0b2afb 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -211,7 +211,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 50_000, - Post4844Blobs: true, + Post4844Blobs: false, ForcePost4844Blobs: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, From a80fa288679b0f4145e477ffd54ef2af34ad0f99 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 16:53:47 -0700 Subject: [PATCH 063/103] Add ArbOS precompile method to get scheduled upgrade --- contracts | 2 +- precompiles/ArbOwnerPublic.go | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/contracts b/contracts index 7b84be56ec..6e61fdb475 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7b84be56ec5352aaeb85c58fea7f50725e1927d9 +Subproject commit 6e61fdb475bb4b38870e40827dd37627afc4bacd diff --git a/precompiles/ArbOwnerPublic.go b/precompiles/ArbOwnerPublic.go index 4064f41bef..451e18e1cc 100644 --- a/precompiles/ArbOwnerPublic.go +++ b/precompiles/ArbOwnerPublic.go @@ -52,3 +52,16 @@ func (con ArbOwnerPublic) GetInfraFeeAccount(c ctx, evm mech) (addr, error) { func (con ArbOwnerPublic) GetBrotliCompressionLevel(c ctx, evm mech) (uint64, error) { return c.State.BrotliCompressionLevel() } + +// GetScheduledUpgrade gets the next scheduled ArbOS version upgrade and its activation timestamp. +// Returns (0, 0, nil) if no ArbOS upgrade is scheduled. +func (con ArbOwnerPublic) GetScheduledUpgrade(c ctx, evm mech) (uint64, uint64, error) { + version, timestamp, err := c.State.GetScheduledUpgrade() + if err != nil { + return 0, 0, err + } + if c.State.ArbOSVersion() >= version { + return 0, 0, nil + } + return version, timestamp, nil +} From 76924e06efc3a4292b36dcf489a9a86d7dbd44b9 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 16:58:19 -0700 Subject: [PATCH 064/103] Bump contracts pin --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 6e61fdb475..9a6bfad236 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 6e61fdb475bb4b38870e40827dd37627afc4bacd +Subproject commit 9a6bfad2363322099d399698751551ff044c7a72 From 4db1014c0f0e5854177b4dc7c07e5c8b6380d870 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 17:06:51 -0700 Subject: [PATCH 065/103] Add test of scheduling upgrade and getting it --- system_tests/precompile_test.go | 51 +++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 10db09275b..7040a956cb 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -120,3 +120,54 @@ func TestPrecompileErrorGasLeft(t *testing.T) { Require(t, err) assertNotAllGasConsumed(common.HexToAddress("0xff"), arbDebug.Methods["legacyError"].ID) } + +func TestScheduleArbosUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + callOpts := &bind.CallOpts{Context: ctx} + scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // TODO: Once we have an ArbOS 30, test a real upgrade with it + // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test + var testVersion uint64 = 100 + var testTimestamp uint64 = 1 >> 62 + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1>>62) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") + if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { + t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } +} From 7bc141282c4d81a2b7d41723a1f39046e9da2e39 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 28 Jan 2024 17:10:04 -0700 Subject: [PATCH 066/103] Fix testTimestamp --- system_tests/precompile_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 7040a956cb..e0a9c2ce78 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -159,8 +159,8 @@ func TestScheduleArbosUpgrade(t *testing.T) { // TODO: Once we have an ArbOS 30, test a real upgrade with it // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test var testVersion uint64 = 100 - var testTimestamp uint64 = 1 >> 62 - tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1>>62) + var testTimestamp uint64 = 1 << 62 + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) Require(t, err) _, err = builder.L2.EnsureTxSucceeded(tx) Require(t, err) From a4cbf3b9fab7f22311d4274fc8992d49162f3567 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 29 Jan 2024 11:42:08 +0100 Subject: [PATCH 067/103] Implement linter for checking 1>>x expressions --- Makefile | 1 + linter/koanf/koanf.go | 4 -- linter/koanf/koanf_test.go | 2 +- linter/pointercheck/pointer.go | 4 -- linter/pointercheck/pointer_test.go | 2 +- linter/rightshift/rightshift.go | 76 ++++++++++++++++++++ linter/rightshift/rightshift_test.go | 36 ++++++++++ linter/structinit/structinit.go | 4 -- linter/structinit/structinit_test.go | 2 +- linter/testdata/src/rightshift/rightshift.go | 14 ++++ 10 files changed, 130 insertions(+), 15 deletions(-) create mode 100644 linter/rightshift/rightshift.go create mode 100644 linter/rightshift/rightshift_test.go create mode 100644 linter/testdata/src/rightshift/rightshift.go diff --git a/Makefile b/Makefile index 8b149bc0e1..956ab0c357 100644 --- a/Makefile +++ b/Makefile @@ -313,6 +313,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make go run ./linter/koanf ./... go run ./linter/pointercheck ./... + go run ./linter/rightshift ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go index d6780760e7..f09fdd3d05 100644 --- a/linter/koanf/koanf.go +++ b/linter/koanf/koanf.go @@ -18,10 +18,6 @@ var ( errIncorrectFlag = errors.New("mismatching flag initialization") ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "koanfcheck", Doc: "check for koanf misconfigurations", diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go index 064ae533c4..0840ae5217 100644 --- a/linter/koanf/koanf_test.go +++ b/linter/koanf/koanf_test.go @@ -20,7 +20,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linter/pointercheck/pointer.go b/linter/pointercheck/pointer.go index 6500b01222..4da2d8cc21 100644 --- a/linter/pointercheck/pointer.go +++ b/linter/pointercheck/pointer.go @@ -11,10 +11,6 @@ import ( "golang.org/x/tools/go/analysis/singlechecker" ) -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - var Analyzer = &analysis.Analyzer{ Name: "pointercheck", Doc: "check for pointer comparison", diff --git a/linter/pointercheck/pointer_test.go b/linter/pointercheck/pointer_test.go index 290e3826de..47d5c63014 100644 --- a/linter/pointercheck/pointer_test.go +++ b/linter/pointercheck/pointer_test.go @@ -11,7 +11,7 @@ import ( func TestAll(t *testing.T) { wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } testdata := filepath.Join(filepath.Dir(wd), "testdata") res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") diff --git a/linter/rightshift/rightshift.go b/linter/rightshift/rightshift.go new file mode 100644 index 0000000000..f50d8a25ac --- /dev/null +++ b/linter/rightshift/rightshift.go @@ -0,0 +1,76 @@ +package main + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +var Analyzer = &analysis.Analyzer{ + Name: "rightshift", + Doc: "check for 1 >> x operation", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testrightshift", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// rightShiftError indicates the position of pointer comparison. +type rightShiftError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all rightshift +// operations. +type Result struct { + Errors []rightShiftError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + be, ok := node.(*ast.BinaryExpr) + if !ok { + return true + } + // Check if the expression is '1 >> x'. + if be.Op == token.SHR && isOne(be.X) { + err := rightShiftError{ + Pos: pass.Fset.Position(be.Pos()), + Message: "found rightshift ('1 >> x') expression, did you mean '1 << x' ?", + } + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +// isOne checks if the expression is a constant 1. +func isOne(expr ast.Expr) bool { + bl, ok := expr.(*ast.BasicLit) + return ok && bl.Kind == token.INT && bl.Value == "1" +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/rightshift/rightshift_test.go b/linter/rightshift/rightshift_test.go new file mode 100644 index 0000000000..41555c068f --- /dev/null +++ b/linter/rightshift/rightshift_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get working directory: %v", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "rightshift") + want := []int{6, 11, 12} + got := erroLines(res) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("analysistest.Ru() unexpected diff in error lines:\n%s\n", diff) + } +} + +func erroLines(errs []*analysistest.Result) []int { + var ret []int + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + for _, err := range r.Errors { + ret = append(ret, err.Pos.Line) + } + } + } + return ret +} diff --git a/linter/structinit/structinit.go b/linter/structinit/structinit.go index e4e65bc3fc..31baf1c90e 100644 --- a/linter/structinit/structinit.go +++ b/linter/structinit/structinit.go @@ -16,10 +16,6 @@ import ( // Note: comment should be directly line above the struct definition. const linterTip = "// lint:require-exhaustive-initialization" -func New(conf any) ([]*analysis.Analyzer, error) { - return []*analysis.Analyzer{Analyzer}, nil -} - // Analyzer implements struct analyzer for structs that are annotated with // `linterTip`, it checks that every instantiation initializes all the fields. var Analyzer = &analysis.Analyzer{ diff --git a/linter/structinit/structinit_test.go b/linter/structinit/structinit_test.go index db3676e185..df8588a58f 100644 --- a/linter/structinit/structinit_test.go +++ b/linter/structinit/structinit_test.go @@ -12,7 +12,7 @@ func testData(t *testing.T) string { t.Helper() wd, err := os.Getwd() if err != nil { - t.Fatalf("Failed to get wd: %s", err) + t.Fatalf("Failed to get working directory: %v", err) } return filepath.Join(filepath.Dir(wd), "testdata") } diff --git a/linter/testdata/src/rightshift/rightshift.go b/linter/testdata/src/rightshift/rightshift.go new file mode 100644 index 0000000000..3ad6d95980 --- /dev/null +++ b/linter/testdata/src/rightshift/rightshift.go @@ -0,0 +1,14 @@ +package rightshift + +import "fmt" + +func doThing(v int) int { + return 1 >> v // Error: Ln: 6 +} + +func calc() { + val := 10 + fmt.Printf("%v", 1>>val) // Error: Ln 11 + _ = doThing(1 >> val) // Error: Ln 12 + fmt.Printf("%v", 1< Date: Mon, 29 Jan 2024 21:41:07 +0000 Subject: [PATCH 068/103] add state recovery option --- cmd/conf/init.go | 59 +++++++++--------- cmd/nitro/init.go | 8 +++ cmd/staterecovery/staterecovery.go | 83 +++++++++++++++++++++++++ execution/gethexec/blockchain.go | 1 + system_tests/staterecovery_test.go | 99 ++++++++++++++++++++++++++++++ 5 files changed, 222 insertions(+), 28 deletions(-) create mode 100644 cmd/staterecovery/staterecovery.go create mode 100644 system_tests/staterecovery_test.go diff --git a/cmd/conf/init.go b/cmd/conf/init.go index bebf1955b7..313e5bbee5 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -7,37 +7,39 @@ import ( ) type InitConfig struct { - Force bool `koanf:"force"` - Url string `koanf:"url"` - DownloadPath string `koanf:"download-path"` - DownloadPoll time.Duration `koanf:"download-poll"` - DevInit bool `koanf:"dev-init"` - DevInitAddress string `koanf:"dev-init-address"` - DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` - Empty bool `koanf:"empty"` - AccountsPerSync uint `koanf:"accounts-per-sync"` - ImportFile string `koanf:"import-file"` - ThenQuit bool `koanf:"then-quit"` - Prune string `koanf:"prune"` - PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMessage int64 `koanf:"reset-to-message"` + Force bool `koanf:"force"` + Url string `koanf:"url"` + DownloadPath string `koanf:"download-path"` + DownloadPoll time.Duration `koanf:"download-poll"` + DevInit bool `koanf:"dev-init"` + DevInitAddress string `koanf:"dev-init-address"` + DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` + Empty bool `koanf:"empty"` + AccountsPerSync uint `koanf:"accounts-per-sync"` + ImportFile string `koanf:"import-file"` + ThenQuit bool `koanf:"then-quit"` + Prune string `koanf:"prune"` + PruneBloomSize uint64 `koanf:"prune-bloom-size"` + ResetToMessage int64 `koanf:"reset-to-message"` + RecreateMissingState bool `koanf:"recreate-missing-state"` } var InitConfigDefault = InitConfig{ - Force: false, - Url: "", - DownloadPath: "/tmp/", - DownloadPoll: time.Minute, - DevInit: false, - DevInitAddress: "", - DevInitBlockNum: 0, - Empty: false, - ImportFile: "", - AccountsPerSync: 100000, - ThenQuit: false, - Prune: "", - PruneBloomSize: 2048, - ResetToMessage: -1, + Force: false, + Url: "", + DownloadPath: "/tmp/", + DownloadPoll: time.Minute, + DevInit: false, + DevInitAddress: "", + DevInitBlockNum: 0, + Empty: false, + ImportFile: "", + AccountsPerSync: 100000, + ThenQuit: false, + Prune: "", + PruneBloomSize: 2048, + ResetToMessage: -1, + RecreateMissingState: false, } func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -55,4 +57,5 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") + f.Bool(prefix+".recreate-missing-state", InitConfigDefault.RecreateMissingState, "if true: in case database exists and force=false, missing state will be recreated and committed to disk") } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 4cf5dcda06..bab15b6157 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -34,6 +34,7 @@ import ( "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/cmd/pruning" + "github.com/offchainlabs/nitro/cmd/staterecovery" "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/statetransfer" @@ -183,6 +184,13 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, l2BlockChain, err } + if config.Init.RecreateMissingState { + err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cachingConfig) + if err != nil { + return chainDb, l2BlockChain, err + } + } + return chainDb, l2BlockChain, nil } readOnlyDb.Close() diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go new file mode 100644 index 0000000000..0f2eba5c60 --- /dev/null +++ b/cmd/staterecovery/staterecovery.go @@ -0,0 +1,83 @@ +package staterecovery + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" +) + +func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheConfig *core.CacheConfig) error { + log.Info("Recreating missing states...") + start := time.Now() + current := bc.Genesis().NumberU64() + 1 + last := bc.CurrentBlock().Number.Uint64() + + previousBlock := bc.GetBlockByNumber(current - 1) + if previousBlock == nil { + return fmt.Errorf("genesis block is missing") + } + hashConfig := *hashdb.Defaults + hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit + trieConfig := &trie.Config{ + Preimages: false, + HashDB: &hashConfig, + } + database := state.NewDatabaseWithConfig(chainDb, trieConfig) + defer database.TrieDB().Close() + previousState, err := state.New(previousBlock.Root(), database, nil) + if err != nil { + return fmt.Errorf("genesis state is missing: %w", err) + } + database.TrieDB().Reference(previousBlock.Root(), common.Hash{}) + logged := time.Now() + recreated := 0 + for current <= last { + if time.Since(logged) > 1*time.Minute { + log.Info("Recreating missing states", "block", current, "target", last, "remaining", last-current, "elapsed", time.Since(start), "recreated", recreated) + logged = time.Now() + } + currentBlock := bc.GetBlockByNumber(current) + if currentBlock == nil { + return fmt.Errorf("missing block %d", current) + } + currentState, err := state.New(currentBlock.Root(), database, nil) + if err != nil { + _, _, _, err := bc.Processor().Process(currentBlock, previousState, vm.Config{}) + if err != nil { + return fmt.Errorf("processing block %d failed: %v", current, err) + } + root, err := previousState.Commit(current, bc.Config().IsEIP158(currentBlock.Number())) + if err != nil { + return fmt.Errorf("StateDB commit failed, number %d root %v: %w", current, currentBlock.Root().Hex(), err) + } + if root.Cmp(currentBlock.Root()) != 0 { + return fmt.Errorf("reached different state root after processing block %d, want %v, have %v", current, currentBlock.Root(), root) + } + // commit to disk + err = database.TrieDB().Commit(root, false) // TODO report = true, do we want this many logs? + if err != nil { + return fmt.Errorf("TrieDB commit failed, number %d root %v: %w", current, root, err) + } + currentState, err = state.New(currentBlock.Root(), database, nil) + if err != nil { + return fmt.Errorf("state reset after block %d failed: %v", current, err) + } + database.TrieDB().Reference(currentBlock.Root(), common.Hash{}) + database.TrieDB().Dereference(previousBlock.Root()) + recreated++ + } + current++ + previousState = currentState + previousBlock = currentBlock + } + log.Info("Finished recreating missing states", "elapsed", time.Since(start), "recreated", recreated) + return nil +} diff --git a/execution/gethexec/blockchain.go b/execution/gethexec/blockchain.go index a85224b635..2a20c3da26 100644 --- a/execution/gethexec/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -67,6 +67,7 @@ var DefaultCachingConfig = CachingConfig{ MaxAmountOfGasToSkipStateSaving: 0, } +// TODO remove stack from parameters as it is no longer needed here func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core.CacheConfig { baseConf := ethconfig.Defaults if cachingConfig.Archive { diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go new file mode 100644 index 0000000000..561e889153 --- /dev/null +++ b/system_tests/staterecovery_test.go @@ -0,0 +1,99 @@ +package arbtest + +import ( + "context" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/trie" + "github.com/offchainlabs/nitro/cmd/staterecovery" + "github.com/offchainlabs/nitro/execution/gethexec" +) + +func TestRectreateMissingStates(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 16 + _ = builder.Build(t) + l2cleanupDone := false + defer func() { + if !l2cleanupDone { + builder.L2.cleanup() + } + builder.L1.cleanup() + }() + builder.L2Info.GenerateAccount("User2") + var txs []*types.Transaction + for i := uint64(0); i < 200; i++ { + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + txs = append(txs, tx) + err := builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + } + for _, tx := range txs { + _, err := builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + } + lastBlock, err := builder.L2.Client.BlockNumber(ctx) + Require(t, err) + l2cleanupDone = true + builder.L2.cleanup() + t.Log("stopped l2 node") + func() { + stack, err := node.New(builder.l2StackConfig) + Require(t, err) + defer stack.Close() + chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) + Require(t, err) + defer chainDb.Close() + cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) + bc, err := gethexec.GetBlockChain(chainDb, cacheConfig, builder.chainConfig, builder.execConfig.TxLookupLimit) + Require(t, err) + err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig) + Require(t, err) + }() + + testClient, cleanup := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: builder.l2StackConfig}) + defer cleanup() + + currentBlock := uint64(0) + // wait for the chain to catch up + for currentBlock < lastBlock { + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + time.Sleep(20 * time.Millisecond) + } + + currentBlock, err = testClient.Client.BlockNumber(ctx) + Require(t, err) + bc := testClient.ExecNode.Backend.ArbInterface().BlockChain() + triedb := bc.StateCache().TrieDB() + var start uint64 + if currentBlock+1 >= builder.execConfig.Caching.BlockCount { + start = currentBlock + 1 - builder.execConfig.Caching.BlockCount + } else { + start = 0 + } + for i := start; i <= currentBlock; i++ { + header := bc.GetHeaderByNumber(i) + _, err := bc.StateAt(header.Root) + Require(t, err) + tr, err := trie.New(trie.TrieID(header.Root), triedb) + Require(t, err) + it, err := tr.NodeIterator(nil) + Require(t, err) + for it.Next(true) { + } + Require(t, it.Error()) + } + + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) + err = testClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = testClient.EnsureTxSucceeded(tx) + Require(t, err) +} From 333e46fa5513e3996856b11eae74be4a09e08dd0 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Mon, 29 Jan 2024 21:47:14 +0000 Subject: [PATCH 069/103] fix referencing --- cmd/nitro/init.go | 2 +- cmd/staterecovery/staterecovery.go | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index bab15b6157..c7d850ac77 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -185,7 +185,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo return chainDb, l2BlockChain, err } if config.Init.RecreateMissingState { - err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cachingConfig) + err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cacheConfig) if err != nil { return chainDb, l2BlockChain, err } diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 0f2eba5c60..a2918a81fe 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -36,7 +36,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon if err != nil { return fmt.Errorf("genesis state is missing: %w", err) } - database.TrieDB().Reference(previousBlock.Root(), common.Hash{}) + _ = database.TrieDB().Reference(previousBlock.Root(), common.Hash{}) logged := time.Now() recreated := 0 for current <= last { @@ -52,11 +52,11 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon if err != nil { _, _, _, err := bc.Processor().Process(currentBlock, previousState, vm.Config{}) if err != nil { - return fmt.Errorf("processing block %d failed: %v", current, err) + return fmt.Errorf("processing block %d failed: %w", current, err) } root, err := previousState.Commit(current, bc.Config().IsEIP158(currentBlock.Number())) if err != nil { - return fmt.Errorf("StateDB commit failed, number %d root %v: %w", current, currentBlock.Root().Hex(), err) + return fmt.Errorf("StateDB commit failed, number %d root %v: %w", current, currentBlock.Root(), err) } if root.Cmp(currentBlock.Root()) != 0 { return fmt.Errorf("reached different state root after processing block %d, want %v, have %v", current, currentBlock.Root(), root) @@ -68,16 +68,17 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon } currentState, err = state.New(currentBlock.Root(), database, nil) if err != nil { - return fmt.Errorf("state reset after block %d failed: %v", current, err) + return fmt.Errorf("state reset after block %d failed: %w", current, err) } - database.TrieDB().Reference(currentBlock.Root(), common.Hash{}) - database.TrieDB().Dereference(previousBlock.Root()) recreated++ } + _ = database.TrieDB().Reference(currentBlock.Root(), common.Hash{}) + _ = database.TrieDB().Dereference(previousBlock.Root()) current++ - previousState = currentState previousBlock = currentBlock + previousState = currentState } + _ = database.TrieDB().Dereference(previousBlock.Root()) log.Info("Finished recreating missing states", "elapsed", time.Since(start), "recreated", recreated) return nil } From 5a54d22a7e80023c347b3ebfdb9a80b28d67e934 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 29 Jan 2024 15:36:56 -0700 Subject: [PATCH 070/103] Prevent a 4844 header from being used inside Anytrust data --- arbstate/inbox.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index cf8f61e97a..fcb1c1ebcb 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -75,6 +75,9 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } payload := data[40:] + // Stage 1: Extract the payload from any data availability header. + // It's important that multiple DAS strategies can't both be invoked in the same batch, + // as these headers are validated by the sequencer inbox and not other DASs. if len(payload) > 0 && IsDASMessageHeaderByte(payload[0]) { if dasReader == nil { log.Error("No DAS Reader configured, but sequencer message found with DAS header") @@ -88,9 +91,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash return parsedMsg, nil } } - } - - if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { + } else if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { blobHashes := payload[1:] if len(blobHashes)%len(common.Hash{}) != 0 { return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") @@ -115,6 +116,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } } + // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { @@ -124,6 +126,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash payload = pl } + // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. if len(payload) > 0 && IsBrotliMessageHeaderByte(payload[0]) { decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) if err == nil { From 254de23352ce8f03398e249a4452207a8ec8851b Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Tue, 30 Jan 2024 14:21:04 +0100 Subject: [PATCH 071/103] Revert running fuzzer in release-ci --- .github/workflows/release-ci.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 4981513a13..29a1732f15 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -37,9 +37,3 @@ jobs: run: | echo "Not a release candidate. Skipping workflow." exit 0 - - - name: Build nitro-fuzzer Docker Image - run: docker build --target nitro-fuzzer -t nitro-fuzzer . - - - name: Run Docker Container - run: docker run nitro-fuzzer From d920feb2cc11cfdd5cd9d03f74dd205b054e8d50 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Tue, 30 Jan 2024 16:59:38 +0100 Subject: [PATCH 072/103] Consolidate linters into single binary --- Makefile | 4 +--- {linter => linters}/koanf/handlers.go | 2 +- {linter => linters}/koanf/koanf.go | 7 +------ {linter => linters}/koanf/koanf_test.go | 2 +- linters/linters.go | 18 ++++++++++++++++++ .../pointercheck/pointercheck.go | 7 +------ .../pointercheck/pointercheck_test.go | 2 +- {linter => linters}/rightshift/rightshift.go | 7 +------ .../rightshift/rightshift_test.go | 2 +- {linter => linters}/structinit/structinit.go | 7 +------ .../structinit/structinit_test.go | 2 +- {linter => linters}/testdata/src/koanf/a/a.go | 0 {linter => linters}/testdata/src/koanf/b/b.go | 0 .../testdata/src/pointercheck/pointercheck.go | 0 .../testdata/src/rightshift/rightshift.go | 0 .../testdata/src/structinit/a/a.go | 0 16 files changed, 28 insertions(+), 32 deletions(-) rename {linter => linters}/koanf/handlers.go (99%) rename {linter => linters}/koanf/koanf.go (95%) rename {linter => linters}/koanf/koanf_test.go (99%) create mode 100644 linters/linters.go rename linter/pointercheck/pointer.go => linters/pointercheck/pointercheck.go (95%) rename linter/pointercheck/pointer_test.go => linters/pointercheck/pointercheck_test.go (96%) rename {linter => linters}/rightshift/rightshift.go (94%) rename {linter => linters}/rightshift/rightshift_test.go (97%) rename {linter => linters}/structinit/structinit.go (96%) rename {linter => linters}/structinit/structinit_test.go (97%) rename {linter => linters}/testdata/src/koanf/a/a.go (100%) rename {linter => linters}/testdata/src/koanf/b/b.go (100%) rename {linter => linters}/testdata/src/pointercheck/pointercheck.go (100%) rename {linter => linters}/testdata/src/rightshift/rightshift.go (100%) rename {linter => linters}/testdata/src/structinit/a/a.go (100%) diff --git a/Makefile b/Makefile index 956ab0c357..d03b940726 100644 --- a/Makefile +++ b/Makefile @@ -311,9 +311,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... - go run ./linter/rightshift ./... + go run ./linters ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/linter/koanf/handlers.go b/linters/koanf/handlers.go similarity index 99% rename from linter/koanf/handlers.go rename to linters/koanf/handlers.go index 5826004014..5ee3b80f9f 100644 --- a/linter/koanf/handlers.go +++ b/linters/koanf/handlers.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "fmt" diff --git a/linter/koanf/koanf.go b/linters/koanf/koanf.go similarity index 95% rename from linter/koanf/koanf.go rename to linters/koanf/koanf.go index f09fdd3d05..e53064b6b3 100644 --- a/linter/koanf/koanf.go +++ b/linters/koanf/koanf.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" @@ -8,7 +8,6 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) var ( @@ -97,7 +96,3 @@ func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { } return ret, nil } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/koanf/koanf_test.go b/linters/koanf/koanf_test.go similarity index 99% rename from linter/koanf/koanf_test.go rename to linters/koanf/koanf_test.go index 0840ae5217..9029951dfa 100644 --- a/linter/koanf/koanf_test.go +++ b/linters/koanf/koanf_test.go @@ -1,4 +1,4 @@ -package main +package koanf import ( "errors" diff --git a/linters/linters.go b/linters/linters.go new file mode 100644 index 0000000000..a6c9f6d55e --- /dev/null +++ b/linters/linters.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/offchainlabs/nitro/linters/koanf" + "github.com/offchainlabs/nitro/linters/pointercheck" + "github.com/offchainlabs/nitro/linters/rightshift" + "github.com/offchainlabs/nitro/linters/structinit" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main( + koanf.Analyzer, + pointercheck.Analyzer, + rightshift.Analyzer, + structinit.Analyzer, + ) +} diff --git a/linter/pointercheck/pointer.go b/linters/pointercheck/pointercheck.go similarity index 95% rename from linter/pointercheck/pointer.go rename to linters/pointercheck/pointercheck.go index 4da2d8cc21..682ebd9357 100644 --- a/linter/pointercheck/pointer.go +++ b/linters/pointercheck/pointercheck.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "fmt" @@ -8,7 +8,6 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) var Analyzer = &analysis.Analyzer{ @@ -90,7 +89,3 @@ func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { } return false } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/pointercheck/pointer_test.go b/linters/pointercheck/pointercheck_test.go similarity index 96% rename from linter/pointercheck/pointer_test.go rename to linters/pointercheck/pointercheck_test.go index 47d5c63014..24f4534bca 100644 --- a/linter/pointercheck/pointer_test.go +++ b/linters/pointercheck/pointercheck_test.go @@ -1,4 +1,4 @@ -package main +package pointercheck import ( "os" diff --git a/linter/rightshift/rightshift.go b/linters/rightshift/rightshift.go similarity index 94% rename from linter/rightshift/rightshift.go rename to linters/rightshift/rightshift.go index f50d8a25ac..d6fcbfec6c 100644 --- a/linter/rightshift/rightshift.go +++ b/linters/rightshift/rightshift.go @@ -1,4 +1,4 @@ -package main +package rightshift import ( "go/ast" @@ -6,7 +6,6 @@ import ( "reflect" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) var Analyzer = &analysis.Analyzer{ @@ -70,7 +69,3 @@ func isOne(expr ast.Expr) bool { bl, ok := expr.(*ast.BasicLit) return ok && bl.Kind == token.INT && bl.Value == "1" } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/rightshift/rightshift_test.go b/linters/rightshift/rightshift_test.go similarity index 97% rename from linter/rightshift/rightshift_test.go rename to linters/rightshift/rightshift_test.go index 41555c068f..3640d79975 100644 --- a/linter/rightshift/rightshift_test.go +++ b/linters/rightshift/rightshift_test.go @@ -1,4 +1,4 @@ -package main +package rightshift import ( "os" diff --git a/linter/structinit/structinit.go b/linters/structinit/structinit.go similarity index 96% rename from linter/structinit/structinit.go rename to linters/structinit/structinit.go index 31baf1c90e..236b8747b2 100644 --- a/linter/structinit/structinit.go +++ b/linters/structinit/structinit.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "fmt" @@ -8,7 +8,6 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/singlechecker" ) // Tip for linter that struct that has this comment should be included in the @@ -112,7 +111,3 @@ type position struct { fileName string line int } - -func main() { - singlechecker.Main(Analyzer) -} diff --git a/linter/structinit/structinit_test.go b/linters/structinit/structinit_test.go similarity index 97% rename from linter/structinit/structinit_test.go rename to linters/structinit/structinit_test.go index df8588a58f..57dfc2b000 100644 --- a/linter/structinit/structinit_test.go +++ b/linters/structinit/structinit_test.go @@ -1,4 +1,4 @@ -package main +package structinit import ( "os" diff --git a/linter/testdata/src/koanf/a/a.go b/linters/testdata/src/koanf/a/a.go similarity index 100% rename from linter/testdata/src/koanf/a/a.go rename to linters/testdata/src/koanf/a/a.go diff --git a/linter/testdata/src/koanf/b/b.go b/linters/testdata/src/koanf/b/b.go similarity index 100% rename from linter/testdata/src/koanf/b/b.go rename to linters/testdata/src/koanf/b/b.go diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linters/testdata/src/pointercheck/pointercheck.go similarity index 100% rename from linter/testdata/src/pointercheck/pointercheck.go rename to linters/testdata/src/pointercheck/pointercheck.go diff --git a/linter/testdata/src/rightshift/rightshift.go b/linters/testdata/src/rightshift/rightshift.go similarity index 100% rename from linter/testdata/src/rightshift/rightshift.go rename to linters/testdata/src/rightshift/rightshift.go diff --git a/linter/testdata/src/structinit/a/a.go b/linters/testdata/src/structinit/a/a.go similarity index 100% rename from linter/testdata/src/structinit/a/a.go rename to linters/testdata/src/structinit/a/a.go From 68d8534e96f7a5bb3c8a3b582df527602263dbcd Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Tue, 30 Jan 2024 17:13:55 +0100 Subject: [PATCH 073/103] Update Go CI workflow with consolidated linters binary --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fbf00bcb2d..b27c196a6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -117,8 +117,7 @@ jobs: skip-pkg-cache: true - name: Custom Lint run: | - go run ./linter/koanf ./... - go run ./linter/pointercheck ./... + go run ./linters ./... - name: Set environment variables run: | From c53c26ac50dfafb5402b2bafb532c85f10a9ecda Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 18:20:09 +0000 Subject: [PATCH 074/103] defer last dereference call --- cmd/staterecovery/staterecovery.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index a2918a81fe..43faa64053 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -37,6 +37,9 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon return fmt.Errorf("genesis state is missing: %w", err) } _ = database.TrieDB().Reference(previousBlock.Root(), common.Hash{}) + defer func() { + _ = database.TrieDB().Dereference(previousBlock.Root()) + }() logged := time.Now() recreated := 0 for current <= last { @@ -78,7 +81,6 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon previousBlock = currentBlock previousState = currentState } - _ = database.TrieDB().Dereference(previousBlock.Root()) log.Info("Finished recreating missing states", "elapsed", time.Since(start), "recreated", recreated) return nil } From 2cb13424eec12b336ed62c5213d75d2a63c5a9df Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 18:42:13 +0000 Subject: [PATCH 075/103] don't use referencing when recovering states --- cmd/staterecovery/staterecovery.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 43faa64053..f2feb5c7aa 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -4,7 +4,6 @@ import ( "fmt" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" @@ -36,10 +35,9 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon if err != nil { return fmt.Errorf("genesis state is missing: %w", err) } - _ = database.TrieDB().Reference(previousBlock.Root(), common.Hash{}) - defer func() { - _ = database.TrieDB().Dereference(previousBlock.Root()) - }() + // we don't need to reference states with `trie.Database.Reference` here, because: + // * either the state nodes will be read from disk and then cached in cleans cache + // * or they will be recreated, saved to disk and then also cached in cleans cache logged := time.Now() recreated := 0 for current <= last { @@ -75,8 +73,6 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon } recreated++ } - _ = database.TrieDB().Reference(currentBlock.Root(), common.Hash{}) - _ = database.TrieDB().Dereference(previousBlock.Root()) current++ previousBlock = currentBlock previousState = currentState From a594bc23bc2fa00053bb71cdedf984a1e11861d0 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 19:04:21 +0000 Subject: [PATCH 076/103] remove unsused assignment --- cmd/staterecovery/staterecovery.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index f2feb5c7aa..f21d213ba7 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -19,8 +19,8 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon current := bc.Genesis().NumberU64() + 1 last := bc.CurrentBlock().Number.Uint64() - previousBlock := bc.GetBlockByNumber(current - 1) - if previousBlock == nil { + genesisBlock := bc.GetBlockByNumber(current - 1) + if genesisBlock == nil { return fmt.Errorf("genesis block is missing") } hashConfig := *hashdb.Defaults @@ -31,7 +31,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon } database := state.NewDatabaseWithConfig(chainDb, trieConfig) defer database.TrieDB().Close() - previousState, err := state.New(previousBlock.Root(), database, nil) + previousState, err := state.New(genesisBlock.Root(), database, nil) if err != nil { return fmt.Errorf("genesis state is missing: %w", err) } @@ -60,7 +60,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon return fmt.Errorf("StateDB commit failed, number %d root %v: %w", current, currentBlock.Root(), err) } if root.Cmp(currentBlock.Root()) != 0 { - return fmt.Errorf("reached different state root after processing block %d, want %v, have %v", current, currentBlock.Root(), root) + return fmt.Errorf("reached different state root after processing block %d, have %v, want %v", current, root, currentBlock.Root()) } // commit to disk err = database.TrieDB().Commit(root, false) // TODO report = true, do we want this many logs? @@ -74,7 +74,6 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon recreated++ } current++ - previousBlock = currentBlock previousState = currentState } log.Info("Finished recreating missing states", "elapsed", time.Since(start), "recreated", recreated) From aa304a9e321bec9985d8d00b8248816e1e381b81 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 21:24:18 +0000 Subject: [PATCH 077/103] don't rely on BlockChain.CurrentBlock to get last available block --- cmd/nitro/init.go | 2 +- cmd/staterecovery/staterecovery.go | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index c7d850ac77..65c8962aa9 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -187,7 +187,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if config.Init.RecreateMissingState { err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cacheConfig) if err != nil { - return chainDb, l2BlockChain, err + return chainDb, l2BlockChain, fmt.Errorf("failed to recreate missing states: %w", err) } } diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index f21d213ba7..0d35d89b25 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -1,6 +1,7 @@ package staterecovery import ( + "errors" "fmt" "time" @@ -14,15 +15,18 @@ import ( ) func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheConfig *core.CacheConfig) error { - log.Info("Recreating missing states...") start := time.Now() current := bc.Genesis().NumberU64() + 1 - last := bc.CurrentBlock().Number.Uint64() - genesisBlock := bc.GetBlockByNumber(current - 1) if genesisBlock == nil { - return fmt.Errorf("genesis block is missing") + return errors.New("genesis block is missing") + } + // find last available block - we cannot rely on bc.CurrentBlock() + last := current + for bc.GetBlockByNumber(last) != nil { + last++ } + last-- hashConfig := *hashdb.Defaults hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit trieConfig := &trie.Config{ @@ -38,7 +42,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon // we don't need to reference states with `trie.Database.Reference` here, because: // * either the state nodes will be read from disk and then cached in cleans cache // * or they will be recreated, saved to disk and then also cached in cleans cache - logged := time.Now() + logged := time.Unix(0, 0) recreated := 0 for current <= last { if time.Since(logged) > 1*time.Minute { From 43a8aeb122f4ee87810d992f9d6be20a661a1a4a Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 21:32:30 +0000 Subject: [PATCH 078/103] improve recovery test --- system_tests/staterecovery_test.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 561e889153..33608bc8aa 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -72,13 +72,7 @@ func TestRectreateMissingStates(t *testing.T) { Require(t, err) bc := testClient.ExecNode.Backend.ArbInterface().BlockChain() triedb := bc.StateCache().TrieDB() - var start uint64 - if currentBlock+1 >= builder.execConfig.Caching.BlockCount { - start = currentBlock + 1 - builder.execConfig.Caching.BlockCount - } else { - start = 0 - } - for i := start; i <= currentBlock; i++ { + for i := uint64(0); i <= currentBlock; i++ { header := bc.GetHeaderByNumber(i) _, err := bc.StateAt(header.Root) Require(t, err) From d5989b5a8cc00a385f3f094a220e21968fd95620 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 21:49:25 +0000 Subject: [PATCH 079/103] add recreate-missing-state config validation --- cmd/conf/init.go | 8 ++++++++ cmd/nitro/nitro.go | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/cmd/conf/init.go b/cmd/conf/init.go index 313e5bbee5..73848ebcf2 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -3,6 +3,7 @@ package conf import ( "time" + "github.com/ethereum/go-ethereum/log" "github.com/spf13/pflag" ) @@ -59,3 +60,10 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") f.Bool(prefix+".recreate-missing-state", InitConfigDefault.RecreateMissingState, "if true: in case database exists and force=false, missing state will be recreated and committed to disk") } + +func (c *InitConfig) Validate() error { + if c.Force && c.RecreateMissingState { + log.Warn("--init.force enabled, --init.recreate-missing-state will have no effect") + } + return nil +} diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 45f539488d..4838d981e3 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -781,6 +781,12 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { } func (c *NodeConfig) Validate() error { + if c.Init.RecreateMissingState && !c.Execution.Caching.Archive { + return errors.New("--init.recreate-missing-state enabled for a non-archive node") + } + if err := c.Init.Validate(); err != nil { + return err + } if err := c.ParentChain.Validate(); err != nil { return err } From 6f9d044f92b15e1bf0bf5a22466f3a25991a46dc Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Tue, 30 Jan 2024 21:52:20 +0000 Subject: [PATCH 080/103] update log messages --- cmd/conf/init.go | 2 +- cmd/nitro/nitro.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/conf/init.go b/cmd/conf/init.go index 73848ebcf2..b9617f30ac 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -63,7 +63,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { func (c *InitConfig) Validate() error { if c.Force && c.RecreateMissingState { - log.Warn("--init.force enabled, --init.recreate-missing-state will have no effect") + log.Warn("force init enabled, recreate-missing-state will have no effect") } return nil } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 4838d981e3..3932e0ed94 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -782,7 +782,7 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { func (c *NodeConfig) Validate() error { if c.Init.RecreateMissingState && !c.Execution.Caching.Archive { - return errors.New("--init.recreate-missing-state enabled for a non-archive node") + return errors.New("recreate-missing-state enabled for a non-archive node") } if err := c.Init.Validate(); err != nil { return err From 07c74c7d4cecd4e456e63bf87d71b59f55ae8ef1 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 31 Jan 2024 11:55:12 +0100 Subject: [PATCH 081/103] Change release workflow trigger condition --- .github/workflows/release-ci.yml | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 29a1732f15..0439b3f420 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -2,13 +2,9 @@ name: Release CI run-name: Release CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: - workflow_dispatch: - merge_group: - pull_request: - push: - branches: - - master - - develop + release: + types: [created, published] + jobs: build_and_run: runs-on: ubuntu-8 @@ -30,10 +26,3 @@ jobs: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ hashFiles('Dockerfile') }} restore-keys: ${{ runner.os }}-buildx- - - - - name: Check PR Label For Release label - if: ${{ !contains(github.event.*.labels.*.name, 'release') }} - run: | - echo "Not a release candidate. Skipping workflow." - exit 0 From 5969a112e883c0e9bd885cca15887a71536acc93 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Thu, 1 Feb 2024 13:54:59 +0100 Subject: [PATCH 082/103] Change trigger to workflow_dispatch only --- .github/workflows/release-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 0439b3f420..036bf46538 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -2,8 +2,7 @@ name: Release CI run-name: Release CI triggered from @${{ github.actor }} of ${{ github.head_ref }} on: - release: - types: [created, published] + workflow_dispatch: jobs: build_and_run: From f61ad77e1f0253f12059aec3e17ae7a7a10821b1 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 1 Feb 2024 20:23:42 +0000 Subject: [PATCH 083/103] allow setting start block for missing states recreation, fix tests --- cmd/conf/init.go | 66 +++++++++++++++--------------- cmd/nitro/init.go | 4 +- cmd/nitro/nitro.go | 4 +- cmd/staterecovery/staterecovery.go | 22 ++++++---- system_tests/Session.vim | 34 +++++++++++++++ system_tests/staterecovery_test.go | 4 +- 6 files changed, 87 insertions(+), 47 deletions(-) create mode 100644 system_tests/Session.vim diff --git a/cmd/conf/init.go b/cmd/conf/init.go index b9617f30ac..71709a8303 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -8,39 +8,39 @@ import ( ) type InitConfig struct { - Force bool `koanf:"force"` - Url string `koanf:"url"` - DownloadPath string `koanf:"download-path"` - DownloadPoll time.Duration `koanf:"download-poll"` - DevInit bool `koanf:"dev-init"` - DevInitAddress string `koanf:"dev-init-address"` - DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` - Empty bool `koanf:"empty"` - AccountsPerSync uint `koanf:"accounts-per-sync"` - ImportFile string `koanf:"import-file"` - ThenQuit bool `koanf:"then-quit"` - Prune string `koanf:"prune"` - PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMessage int64 `koanf:"reset-to-message"` - RecreateMissingState bool `koanf:"recreate-missing-state"` + Force bool `koanf:"force"` + Url string `koanf:"url"` + DownloadPath string `koanf:"download-path"` + DownloadPoll time.Duration `koanf:"download-poll"` + DevInit bool `koanf:"dev-init"` + DevInitAddress string `koanf:"dev-init-address"` + DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` + Empty bool `koanf:"empty"` + AccountsPerSync uint `koanf:"accounts-per-sync"` + ImportFile string `koanf:"import-file"` + ThenQuit bool `koanf:"then-quit"` + Prune string `koanf:"prune"` + PruneBloomSize uint64 `koanf:"prune-bloom-size"` + ResetToMessage int64 `koanf:"reset-to-message"` + RecreateMissingStateFrom uint64 `koanf:"recreate-missing-state-from"` } var InitConfigDefault = InitConfig{ - Force: false, - Url: "", - DownloadPath: "/tmp/", - DownloadPoll: time.Minute, - DevInit: false, - DevInitAddress: "", - DevInitBlockNum: 0, - Empty: false, - ImportFile: "", - AccountsPerSync: 100000, - ThenQuit: false, - Prune: "", - PruneBloomSize: 2048, - ResetToMessage: -1, - RecreateMissingState: false, + Force: false, + Url: "", + DownloadPath: "/tmp/", + DownloadPoll: time.Minute, + DevInit: false, + DevInitAddress: "", + DevInitBlockNum: 0, + Empty: false, + ImportFile: "", + AccountsPerSync: 100000, + ThenQuit: false, + Prune: "", + PruneBloomSize: 2048, + ResetToMessage: -1, + RecreateMissingStateFrom: 0, // 0 = disabled } func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -58,12 +58,12 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") - f.Bool(prefix+".recreate-missing-state", InitConfigDefault.RecreateMissingState, "if true: in case database exists and force=false, missing state will be recreated and committed to disk") + f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states form (0 = disabled)") } func (c *InitConfig) Validate() error { - if c.Force && c.RecreateMissingState { - log.Warn("force init enabled, recreate-missing-state will have no effect") + if c.Force && c.RecreateMissingStateFrom > 0 { + log.Warn("force init enabled, recreate-missing-state-from will have no effect") } return nil } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index 65c8962aa9..ebc57b13b8 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -184,8 +184,8 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, l2BlockChain, err } - if config.Init.RecreateMissingState { - err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cacheConfig) + if config.Init.RecreateMissingStateFrom > 0 { + err = staterecovery.RecreateMissingStates(chainDb, l2BlockChain, cacheConfig, config.Init.RecreateMissingStateFrom) if err != nil { return chainDb, l2BlockChain, fmt.Errorf("failed to recreate missing states: %w", err) } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 3932e0ed94..e5b97d4a1c 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -781,8 +781,8 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { } func (c *NodeConfig) Validate() error { - if c.Init.RecreateMissingState && !c.Execution.Caching.Archive { - return errors.New("recreate-missing-state enabled for a non-archive node") + if c.Init.RecreateMissingStateFrom > 0 && !c.Execution.Caching.Archive { + return errors.New("recreate-missing-state-from enabled for a non-archive node") } if err := c.Init.Validate(); err != nil { return err diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 0d35d89b25..4b5f9b846f 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -1,7 +1,6 @@ package staterecovery import ( - "errors" "fmt" "time" @@ -14,12 +13,17 @@ import ( "github.com/ethereum/go-ethereum/trie/triedb/hashdb" ) -func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheConfig *core.CacheConfig) error { +func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheConfig *core.CacheConfig, startBlock uint64) error { start := time.Now() - current := bc.Genesis().NumberU64() + 1 - genesisBlock := bc.GetBlockByNumber(current - 1) - if genesisBlock == nil { - return errors.New("genesis block is missing") + current := startBlock + genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum + if current < genesis+1 { + log.Warn("recreate-missing-states-from before genesis+1, starting from genesis+1") + current = genesis + 1 + } + previousBlock := bc.GetBlockByNumber(current - 1) + if previousBlock == nil { + return fmt.Errorf("start block parent is missing, parent block number: %d", current-1) } // find last available block - we cannot rely on bc.CurrentBlock() last := current @@ -35,9 +39,9 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon } database := state.NewDatabaseWithConfig(chainDb, trieConfig) defer database.TrieDB().Close() - previousState, err := state.New(genesisBlock.Root(), database, nil) + previousState, err := state.New(previousBlock.Root(), database, nil) if err != nil { - return fmt.Errorf("genesis state is missing: %w", err) + return fmt.Errorf("state of start block parent is missing: %w", err) } // we don't need to reference states with `trie.Database.Reference` here, because: // * either the state nodes will be read from disk and then cached in cleans cache @@ -67,7 +71,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon return fmt.Errorf("reached different state root after processing block %d, have %v, want %v", current, root, currentBlock.Root()) } // commit to disk - err = database.TrieDB().Commit(root, false) // TODO report = true, do we want this many logs? + err = database.TrieDB().Commit(root, false) if err != nil { return fmt.Errorf("TrieDB commit failed, number %d root %v: %w", current, root, err) } diff --git a/system_tests/Session.vim b/system_tests/Session.vim new file mode 100644 index 0000000000..fd61e42444 --- /dev/null +++ b/system_tests/Session.vim @@ -0,0 +1,34 @@ +let SessionLoad = 1 +let s:so_save = &g:so | let s:siso_save = &g:siso | setg so=0 siso=0 | setl so=-1 siso=-1 +let v:this_session=expand(":p") +silent only +silent tabonly +cd ~/repos/nitro3/system_tests +if expand('%') == '' && !&modified && line('$') <= 1 && getline(1) == '' + let s:wipebuf = bufnr('%') +endif +let s:shortmess_save = &shortmess +if &shortmess =~ 'A' + set shortmess=aoOA +else + set shortmess=aoO +endif +argglobal +%argdel +tabnext 1 +if exists('s:wipebuf') && len(win_findbuf(s:wipebuf)) == 0 && getbufvar(s:wipebuf, '&buftype') isnot# 'terminal' + silent exe 'bwipe ' . s:wipebuf +endif +unlet! s:wipebuf +set winheight=1 winwidth=20 +let &shortmess = s:shortmess_save +let s:sx = expand(":p:r")."x.vim" +if filereadable(s:sx) + exe "source " . fnameescape(s:sx) +endif +let &g:so = s:so_save | let &g:siso = s:siso_save +set hlsearch +nohlsearch +doautoall SessionLoadPost +unlet SessionLoad +" vim: set ft=vim : diff --git a/system_tests/staterecovery_test.go b/system_tests/staterecovery_test.go index 33608bc8aa..ac30038cc1 100644 --- a/system_tests/staterecovery_test.go +++ b/system_tests/staterecovery_test.go @@ -17,7 +17,9 @@ func TestRectreateMissingStates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Caching.Archive = true builder.execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving = 16 + builder.execConfig.Caching.SnapshotCache = 0 // disable snapshots _ = builder.Build(t) l2cleanupDone := false defer func() { @@ -53,7 +55,7 @@ func TestRectreateMissingStates(t *testing.T) { cacheConfig := gethexec.DefaultCacheConfigFor(stack, &gethexec.DefaultCachingConfig) bc, err := gethexec.GetBlockChain(chainDb, cacheConfig, builder.chainConfig, builder.execConfig.TxLookupLimit) Require(t, err) - err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig) + err = staterecovery.RecreateMissingStates(chainDb, bc, cacheConfig, 1) Require(t, err) }() From ea34814fbfebc1a6b3d2eaf29d49d8b03e8a2378 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 1 Feb 2024 20:28:03 +0000 Subject: [PATCH 084/103] remove Session.vim added by mistake --- system_tests/Session.vim | 34 ---------------------------------- 1 file changed, 34 deletions(-) delete mode 100644 system_tests/Session.vim diff --git a/system_tests/Session.vim b/system_tests/Session.vim deleted file mode 100644 index fd61e42444..0000000000 --- a/system_tests/Session.vim +++ /dev/null @@ -1,34 +0,0 @@ -let SessionLoad = 1 -let s:so_save = &g:so | let s:siso_save = &g:siso | setg so=0 siso=0 | setl so=-1 siso=-1 -let v:this_session=expand(":p") -silent only -silent tabonly -cd ~/repos/nitro3/system_tests -if expand('%') == '' && !&modified && line('$') <= 1 && getline(1) == '' - let s:wipebuf = bufnr('%') -endif -let s:shortmess_save = &shortmess -if &shortmess =~ 'A' - set shortmess=aoOA -else - set shortmess=aoO -endif -argglobal -%argdel -tabnext 1 -if exists('s:wipebuf') && len(win_findbuf(s:wipebuf)) == 0 && getbufvar(s:wipebuf, '&buftype') isnot# 'terminal' - silent exe 'bwipe ' . s:wipebuf -endif -unlet! s:wipebuf -set winheight=1 winwidth=20 -let &shortmess = s:shortmess_save -let s:sx = expand(":p:r")."x.vim" -if filereadable(s:sx) - exe "source " . fnameescape(s:sx) -endif -let &g:so = s:so_save | let &g:siso = s:siso_save -set hlsearch -nohlsearch -doautoall SessionLoadPost -unlet SessionLoad -" vim: set ft=vim : From b9a1c4945de373d41f16957b46692b0ca9ce0d64 Mon Sep 17 00:00:00 2001 From: Chris Buckland Date: Thu, 1 Feb 2024 20:36:37 +0000 Subject: [PATCH 085/103] Log error data where available --- util/rpcclient/rpcclient.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index dee6e9252a..d88e6dfc36 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -155,13 +155,25 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth ctx, cancelCtx = context.WithCancel(ctx_in) } err = c.client.CallContext(ctx, result, method, args...) + cancelCtx() logger := log.Trace limit := int(c.config().ArgLogLimit) if err != nil && err.Error() != "already known" { logger = log.Info } - logger("rpc response", "method", method, "logId", logId, "err", err, "result", limitedMarshal{limit, result}, "attempt", i, "args", limitedArgumentsMarshal{limit, args}) + logEntry := []interface{}{ + "method", method, + "logId", logId, + "err", err, + "result", limitedMarshal{limit, result}, + "attempt", i, + "args", limitedArgumentsMarshal{limit, args}, + } + if da, ok := err.(rpc.DataError); ok { + logEntry = append(logEntry, "errorData", limitedMarshal{limit, da.ErrorData()}) + } + logger("rpc response", logEntry...) if err == nil { return nil } From e8167fee6804ba612ed0721b39d42a6b858094ed Mon Sep 17 00:00:00 2001 From: Chris Buckland Date: Thu, 1 Feb 2024 20:58:31 +0000 Subject: [PATCH 086/103] Use lint reco for errors.as --- util/rpcclient/rpcclient.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index d88e6dfc36..dbc145d490 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -170,8 +170,9 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth "attempt", i, "args", limitedArgumentsMarshal{limit, args}, } - if da, ok := err.(rpc.DataError); ok { - logEntry = append(logEntry, "errorData", limitedMarshal{limit, da.ErrorData()}) + var dataErr rpc.DataError + if errors.As(err, &dataErr) { + logEntry = append(logEntry, "errorData", limitedMarshal{limit, dataErr.ErrorData()}) } logger("rpc response", logEntry...) if err == nil { From 03138e0c783fb794afc0241b81f908869fd89cd7 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 1 Feb 2024 14:28:37 -0700 Subject: [PATCH 087/103] Pull in geth changes to parse ABI errors --- go-ethereum | 2 +- precompiles/precompile.go | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/go-ethereum b/go-ethereum index 1acd9c64ac..a0685a71f3 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 1acd9c64ac5804729475ef60aa578b4ec52fa0e6 +Subproject commit a0685a71f31c14f414c01cb5c7c91170fd0e84be diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 5d2ecce745..175bb21902 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -96,12 +96,8 @@ func RenderSolError(solErr abi.Error, data []byte) (string, error) { if err != nil { return "", err } - valsRange, ok := vals.([]interface{}) - if !ok { - return "", errors.New("unexpected unpack result") - } - strVals := make([]string, 0, len(valsRange)) - for _, val := range valsRange { + strVals := make([]string, 0, len(vals)) + for _, val := range vals { strVals = append(strVals, fmt.Sprintf("%v", val)) } return fmt.Sprintf("error %v(%v)", solErr.Name, strings.Join(strVals, ", ")), nil From 5bda2077f85595aa4f176330d922393edc0b9ebb Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 1 Feb 2024 22:27:58 +0000 Subject: [PATCH 088/103] fix typo --- cmd/conf/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/conf/init.go b/cmd/conf/init.go index 71709a8303..8a6c5096fb 100644 --- a/cmd/conf/init.go +++ b/cmd/conf/init.go @@ -58,7 +58,7 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") - f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states form (0 = disabled)") + f.Uint64(prefix+".recreate-missing-state-from", InitConfigDefault.RecreateMissingStateFrom, "block number to start recreating missing states from (0 = disabled)") } func (c *InitConfig) Validate() error { From b94cf9c2455fd2c687493b32ccb9a124b007fd62 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 1 Feb 2024 22:49:37 +0000 Subject: [PATCH 089/103] address review comment --- cmd/staterecovery/staterecovery.go | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 4b5f9b846f..97c570e9b0 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -25,12 +25,6 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon if previousBlock == nil { return fmt.Errorf("start block parent is missing, parent block number: %d", current-1) } - // find last available block - we cannot rely on bc.CurrentBlock() - last := current - for bc.GetBlockByNumber(last) != nil { - last++ - } - last-- hashConfig := *hashdb.Defaults hashConfig.CleanCacheSize = cacheConfig.TrieCleanLimit trieConfig := &trie.Config{ @@ -48,14 +42,18 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon // * or they will be recreated, saved to disk and then also cached in cleans cache logged := time.Unix(0, 0) recreated := 0 - for current <= last { - if time.Since(logged) > 1*time.Minute { - log.Info("Recreating missing states", "block", current, "target", last, "remaining", last-current, "elapsed", time.Since(start), "recreated", recreated) - logged = time.Now() - } + for { currentBlock := bc.GetBlockByNumber(current) if currentBlock == nil { - return fmt.Errorf("missing block %d", current) + break + } + if time.Since(logged) > 1*time.Minute { + var target uint64 + if h := bc.CurrentBlock(); h != nil { + target = h.Number.Uint64() + } + log.Info("Recreating missing states", "block", current, "target", target, "remaining", target-current, "elapsed", time.Since(start), "recreated", recreated) + logged = time.Now() } currentState, err := state.New(currentBlock.Root(), database, nil) if err != nil { From b3f42cdd7e7bcb8ee064c19504caa07ee684bb58 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 1 Feb 2024 23:04:36 +0000 Subject: [PATCH 090/103] set target only once --- cmd/staterecovery/staterecovery.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index 97c570e9b0..bb67830354 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -15,6 +15,11 @@ import ( func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheConfig *core.CacheConfig, startBlock uint64) error { start := time.Now() + currentHeader := bc.CurrentBlock() + if currentHeader == nil { + return fmt.Errorf("current header is nil") + } + target := currentHeader.Number.Uint64() current := startBlock genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum if current < genesis+1 { @@ -48,11 +53,7 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon break } if time.Since(logged) > 1*time.Minute { - var target uint64 - if h := bc.CurrentBlock(); h != nil { - target = h.Number.Uint64() - } - log.Info("Recreating missing states", "block", current, "target", target, "remaining", target-current, "elapsed", time.Since(start), "recreated", recreated) + log.Info("Recreating missing states", "block", current, "target", target, "remaining", int64(target)-int64(current), "elapsed", time.Since(start), "recreated", recreated) logged = time.Now() } currentState, err := state.New(currentBlock.Root(), database, nil) From 74692529c3883e1f8341cede2f585da71323559a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 1 Feb 2024 16:39:46 -0700 Subject: [PATCH 091/103] Fix tests --- system_tests/precompile_test.go | 8 ++++++-- system_tests/retryable_test.go | 3 ++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index e0a9c2ce78..0ad0f8f1e4 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -5,6 +5,7 @@ package arbtest import ( "context" + "fmt" "math/big" "testing" @@ -67,7 +68,9 @@ func TestCustomSolidityErrors(t *testing.T) { Fatal(t, "customRevert call should have errored") } observedMessage := customError.Error() - expectedMessage := "execution reverted: error Custom(1024, This spider family wards off bugs: /\\oo/\\ //\\(oo)/\\ /\\oo/\\, true)" + expectedError := "Custom(1024, This spider family wards off bugs: /\\oo/\\ //\\(oo)/\\ /\\oo/\\, true)" + // The first error is server side. The second error is client side ABI decoding. + expectedMessage := fmt.Sprintf("execution reverted: error %v: %v", expectedError, expectedError) if observedMessage != expectedMessage { Fatal(t, observedMessage) } @@ -79,7 +82,8 @@ func TestCustomSolidityErrors(t *testing.T) { Fatal(t, "out of range ArbBlockHash call should have errored") } observedMessage = customError.Error() - expectedMessage = "execution reverted: error InvalidBlockNumber(1000000000, 1)" + expectedError = "InvalidBlockNumber(1000000000, 1)" + expectedMessage = fmt.Sprintf("execution reverted: error %v: %v", expectedError, expectedError) if observedMessage != expectedMessage { Fatal(t, observedMessage) } diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 4619671700..4e7bd2c7d8 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -121,7 +121,8 @@ func TestRetryableNoExist(t *testing.T) { arbRetryableTx, err := precompilesgen.NewArbRetryableTx(common.HexToAddress("6e"), builder.L2.Client) Require(t, err) _, err = arbRetryableTx.GetTimeout(&bind.CallOpts{}, common.Hash{}) - if err.Error() != "execution reverted: error NoTicketWithID()" { + // The first error is server side. The second error is client side ABI decoding. + if err.Error() != "execution reverted: error NoTicketWithID(): NoTicketWithID()" { Fatal(t, "didn't get expected NoTicketWithID error") } } From f632620e486df8b5a9fcbf2d4501aab0696c1787 Mon Sep 17 00:00:00 2001 From: Maciej Kulawik Date: Thu, 1 Feb 2024 23:45:17 +0000 Subject: [PATCH 092/103] update override warning --- cmd/staterecovery/staterecovery.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/staterecovery/staterecovery.go b/cmd/staterecovery/staterecovery.go index bb67830354..6390826a91 100644 --- a/cmd/staterecovery/staterecovery.go +++ b/cmd/staterecovery/staterecovery.go @@ -23,8 +23,8 @@ func RecreateMissingStates(chainDb ethdb.Database, bc *core.BlockChain, cacheCon current := startBlock genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum if current < genesis+1 { - log.Warn("recreate-missing-states-from before genesis+1, starting from genesis+1") current = genesis + 1 + log.Warn("recreate-missing-states-from before genesis+1, starting from genesis+1", "configured", startBlock, "override", current) } previousBlock := bc.GetBlockByNumber(current - 1) if previousBlock == nil { From d36491aaf91f2589bfbb227cd6d2efdc230139df Mon Sep 17 00:00:00 2001 From: Jeremy Date: Fri, 2 Feb 2024 10:50:35 +0800 Subject: [PATCH 093/103] Update common_test.go --- system_tests/common_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index a950ebd7ca..be782c72fd 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -929,7 +929,7 @@ func Create2ndNodeWithConfig( currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(13)) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan, big.NewInt(1337)) Require(t, err) err = currentNode.Start(ctx) From f0156ccb615771393feb7b9ba69a24a093925c5a Mon Sep 17 00:00:00 2001 From: amsanghi Date: Fri, 2 Feb 2024 21:25:02 +0530 Subject: [PATCH 094/103] Add P2P options --- cmd/genericconf/server.go | 54 ++++++++++++++++++++++++++++++++++++++ cmd/nitro-val/config.go | 3 +++ cmd/nitro-val/nitro_val.go | 4 +-- cmd/nitro/nitro.go | 7 ++--- 4 files changed, 62 insertions(+), 6 deletions(-) diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 3da027ab27..f207e9ff3e 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -4,6 +4,8 @@ package genericconf import ( + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enode" "time" flag "github.com/spf13/pflag" @@ -185,6 +187,58 @@ func AuthRPCConfigAddOptions(prefix string, f *flag.FlagSet) { f.StringSlice(prefix+".api", AuthRPCConfigDefault.API, "APIs offered over the AUTH-RPC interface") } +type P2PConfig struct { + ListenAddr string `koanf:"listen-addr"` + NoDial bool `koanf:"no-dial"` + NoDiscovery bool `koanf:"no-discovery"` + MaxPeers int `koanf:"max-peers"` + DiscoveryV5 bool `koanf:"discovery-v5"` + DiscoveryV4 bool `koanf:"discovery-v4"` + Bootnodes []string `koanf:"bootnodes"` + BootnodesV5 []string `koanf:"bootnodes-v5"` +} + +func (p P2PConfig) Apply(stackConf *node.Config) { + stackConf.P2P.ListenAddr = p.ListenAddr + stackConf.P2P.NoDial = p.NoDial + stackConf.P2P.NoDiscovery = p.NoDiscovery + stackConf.P2P.MaxPeers = p.MaxPeers + stackConf.P2P.DiscoveryV5 = p.DiscoveryV5 + stackConf.P2P.DiscoveryV4 = p.DiscoveryV4 + stackConf.P2P.BootstrapNodes = parseBootnodes(p.Bootnodes) + stackConf.P2P.BootstrapNodesV5 = parseBootnodes(p.BootnodesV5) +} + +func parseBootnodes(urls []string) []*enode.Node { + nodes := make([]*enode.Node, 0, len(urls)) + for _, url := range urls { + if url != "" { + node, err := enode.Parse(enode.ValidSchemes, url) + if err != nil { + log.Crit("Bootstrap URL invalid", "enode", url, "err", err) + return nil + } + nodes = append(nodes, node) + } + } + return nodes +} + +var P2PConfigDefault = P2PConfig{ + ListenAddr: "", + NoDial: true, + NoDiscovery: true, + MaxPeers: 50, + DiscoveryV5: false, + DiscoveryV4: false, +} + +func P2PConfigAddOptions(prefix string, f *flag.FlagSet) { + f.String(prefix+".listen-addr", P2PConfigDefault.ListenAddr, "P2P listen address") + f.Bool(prefix+".no-dial", P2PConfigDefault.NoDial, "P2P no dial") + f.Bool(prefix+".no-discovery", P2PConfigDefault.NoDiscovery, "P2P no discovery") +} + type MetricsServerConfig struct { Addr string `koanf:"addr"` Port int `koanf:"port"` diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index cf10787d6d..51d3978836 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -27,6 +27,7 @@ type ValidationNodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` + P2P genericconf.P2PConfig `koanf:"p2p"` Auth genericconf.AuthRPCConfig `koanf:"auth"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -66,6 +67,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ HTTP: HTTPConfigDefault, WS: WSConfigDefault, IPC: IPCConfigDefault, + P2P: genericconf.P2PConfigDefault, Auth: genericconf.AuthRPCConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -85,6 +87,7 @@ func ValidationNodeConfigAddOptions(f *flag.FlagSet) { genericconf.WSConfigAddOptions("ws", f) genericconf.IPCConfigAddOptions("ipc", f) genericconf.AuthRPCConfigAddOptions("auth", f) + genericconf.P2PConfigAddOptions("p2p", f) f.Bool("metrics", ValidationNodeConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) f.Bool("pprof", ValidationNodeConfigDefault.PProf, "enable pprof") diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 20b8b23628..fea95cbb15 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -70,9 +70,7 @@ func mainImpl() int { nodeConfig.WS.Apply(&stackConf) nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) - stackConf.P2P.ListenAddr = "" - stackConf.P2P.NoDial = true - stackConf.P2P.NoDiscovery = true + nodeConfig.P2P.Apply(&stackConf) vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = strippedRevision diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index e5b97d4a1c..f956b4674e 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -178,9 +178,7 @@ func mainImpl() int { if nodeConfig.WS.ExposeAll { stackConf.WSModules = append(stackConf.WSModules, "personal") } - stackConf.P2P.ListenAddr = "" - stackConf.P2P.NoDial = true - stackConf.P2P.NoDiscovery = true + nodeConfig.P2P.Apply(&stackConf) vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = strippedRevision @@ -674,6 +672,7 @@ type NodeConfig struct { IPC genericconf.IPCConfig `koanf:"ipc"` Auth genericconf.AuthRPCConfig `koanf:"auth"` GraphQL genericconf.GraphQLConfig `koanf:"graphql"` + P2P genericconf.P2PConfig `koanf:"p2p"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` PProf bool `koanf:"pprof"` @@ -698,6 +697,7 @@ var NodeConfigDefault = NodeConfig{ IPC: genericconf.IPCConfigDefault, Auth: genericconf.AuthRPCConfigDefault, GraphQL: genericconf.GraphQLConfigDefault, + P2P: genericconf.P2PConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, Init: conf.InitConfigDefault, @@ -721,6 +721,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { genericconf.WSConfigAddOptions("ws", f) genericconf.IPCConfigAddOptions("ipc", f) genericconf.AuthRPCConfigAddOptions("auth", f) + genericconf.P2PConfigAddOptions("p2p", f) genericconf.GraphQLConfigAddOptions("graphql", f) f.Bool("metrics", NodeConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) From 8eb64f241b9d969ffb81f43163adb8f06f32242c Mon Sep 17 00:00:00 2001 From: amsanghi Date: Fri, 2 Feb 2024 21:25:58 +0530 Subject: [PATCH 095/103] minor fix --- cmd/genericconf/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index f207e9ff3e..a8a47fae41 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -4,13 +4,13 @@ package genericconf import ( - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" "time" flag "github.com/spf13/pflag" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/enode" ) type HTTPConfig struct { From 2aa56569efc935cd5629ac9d1fa37557b1328447 Mon Sep 17 00:00:00 2001 From: amsanghi Date: Fri, 2 Feb 2024 21:27:06 +0530 Subject: [PATCH 096/103] minor fix --- cmd/genericconf/server.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index a8a47fae41..7550791d6d 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -231,12 +231,19 @@ var P2PConfigDefault = P2PConfig{ MaxPeers: 50, DiscoveryV5: false, DiscoveryV4: false, + Bootnodes: []string{}, + BootnodesV5: []string{}, } func P2PConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".listen-addr", P2PConfigDefault.ListenAddr, "P2P listen address") f.Bool(prefix+".no-dial", P2PConfigDefault.NoDial, "P2P no dial") f.Bool(prefix+".no-discovery", P2PConfigDefault.NoDiscovery, "P2P no discovery") + f.Int(prefix+".max-peers", P2PConfigDefault.MaxPeers, "P2P max peers") + f.Bool(prefix+".discovery-v5", P2PConfigDefault.DiscoveryV5, "P2P discovery v5") + f.Bool(prefix+".discovery-v4", P2PConfigDefault.DiscoveryV4, "P2P discovery v4") + f.StringSlice(prefix+".bootnodes", P2PConfigDefault.Bootnodes, "P2P bootnodes") + f.StringSlice(prefix+".bootnodes-v5", P2PConfigDefault.BootnodesV5, "P2P bootnodes v5") } type MetricsServerConfig struct { From acc85b37dd2796fc49de3f15a3fd30c4749850e3 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 2 Feb 2024 10:31:44 -0700 Subject: [PATCH 097/103] Gate GetScheduledUpgrade to ArbOS 20 --- precompiles/precompile.go | 1 + 1 file changed, 1 insertion(+) diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 5d2ecce745..5a16a1f903 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -560,6 +560,7 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = 11 ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 20 + ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = 20 ArbRetryableImpl := &ArbRetryableTx{Address: types.ArbRetryableTxAddress} ArbRetryable := insert(MakePrecompile(templates.ArbRetryableTxMetaData, ArbRetryableImpl)) From f16bc040a326b0a766044a23015cb7b7f2c20a9b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 2 Feb 2024 11:46:11 -0700 Subject: [PATCH 098/103] Test precompiles per ArbOS version --- precompiles/precompile_test.go | 48 ++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 16 deletions(-) diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index 02d962f0b4..975856bced 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -4,10 +4,13 @@ package precompiles import ( + "fmt" "math/big" + "os" "testing" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -176,22 +179,35 @@ func TestEventCosts(t *testing.T) { } } -type FatalBurner struct { - t *testing.T - count uint64 - gasLeft uint64 -} - -func NewFatalBurner(t *testing.T, limit uint64) FatalBurner { - return FatalBurner{t, 0, limit} -} +func TestPrecompilesPerArbosVersion(t *testing.T) { + // Set up a logger in case log.Crit is called by Precompiles() + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.LvlWarn) + log.Root().SetHandler(glogger) + + expectedNewMethodsPerArbosVersion := map[uint64]int{ + 0: 89, + 5: 3, + 10: 2, + 11: 4, + 20: 8, + } + + precompiles := Precompiles() + newMethodsPerArbosVersion := make(map[uint64]int) + for _, precompile := range precompiles { + for _, method := range precompile.Precompile().methods { + newMethodsPerArbosVersion[method.arbosVersion]++ + } + } -func (burner FatalBurner) Burn(amount uint64) error { - burner.t.Helper() - burner.count += 1 - if burner.gasLeft < amount { - Fail(burner.t, "out of gas after", burner.count, "burns") + if len(expectedNewMethodsPerArbosVersion) != len(newMethodsPerArbosVersion) { + t.Errorf("expected %v ArbOS versions with new precompile methods but got %v", len(expectedNewMethodsPerArbosVersion), len(newMethodsPerArbosVersion)) + } + for version, count := range newMethodsPerArbosVersion { + fmt.Printf("got %v version count %v\n", version, count) + if expectedNewMethodsPerArbosVersion[version] != count { + t.Errorf("expected %v new precompile methods for ArbOS version %v but got %v", expectedNewMethodsPerArbosVersion[version], version, count) + } } - burner.gasLeft -= amount - return nil } From ab4d316c0855908e45a0ac76633856963853152f Mon Sep 17 00:00:00 2001 From: amsanghi Date: Tue, 6 Feb 2024 21:17:29 +0530 Subject: [PATCH 099/103] Changes based on PR comments --- arbnode/sync_monitor.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 5fa9dbabcf..24b00a4afc 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -175,9 +175,13 @@ func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { latestValidatedGS := s.txStreamer.validator.GetLastValidated() - count, err := s.inboxReader.tracker.GetBatchMessageCount(latestValidatedGS.Batch - 1) - if err != nil { - return 0, err + var count arbutil.MessageIndex + var err error + if latestValidatedGS.Batch > 0 { + count, err = s.inboxReader.tracker.GetBatchMessageCount(latestValidatedGS.Batch - 1) + if err != nil { + return 0, err + } } count += arbutil.MessageIndex(latestValidatedGS.PosInBatch) return count, nil From 38fb7dc974c5020ecc6c6f613d074bfdd96cfddb Mon Sep 17 00:00:00 2001 From: amsanghi Date: Tue, 6 Feb 2024 22:21:49 +0530 Subject: [PATCH 100/103] Changes based on PR comments --- arbnode/sync_monitor.go | 13 +++---------- staker/block_validator.go | 8 ++++---- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index 24b00a4afc..99a66abde2 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -174,17 +174,10 @@ func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { } func (s *SyncMonitor) getLatestValidatedCount() (arbutil.MessageIndex, error) { - latestValidatedGS := s.txStreamer.validator.GetLastValidated() - var count arbutil.MessageIndex - var err error - if latestValidatedGS.Batch > 0 { - count, err = s.inboxReader.tracker.GetBatchMessageCount(latestValidatedGS.Batch - 1) - if err != nil { - return 0, err - } + if s.txStreamer.validator == nil { + return 0, errors.New("validator not set up") } - count += arbutil.MessageIndex(latestValidatedGS.PosInBatch) - return count, nil + return s.txStreamer.validator.GetValidated(), nil } func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) { diff --git a/staker/block_validator.go b/staker/block_validator.go index 4ab31c8526..03d216654a 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -1209,8 +1209,8 @@ func (v *BlockValidator) WaitForPos(t *testing.T, ctx context.Context, pos arbut } } -func (v *BlockValidator) GetLastValidated() validator.GoGlobalState { - v.reorgMutex.Lock() - defer v.reorgMutex.Unlock() - return v.lastValidGS +func (v *BlockValidator) GetValidated() arbutil.MessageIndex { + v.reorgMutex.RLock() + defer v.reorgMutex.RUnlock() + return v.validated() } From be921962b76b71e615b088b3aba2d3c6462384a7 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 6 Feb 2024 12:04:17 -0600 Subject: [PATCH 101/103] Bump go-ethereum submodule pin to latest master --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index a0685a71f3..36cc857932 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit a0685a71f31c14f414c01cb5c7c91170fd0e84be +Subproject commit 36cc85793228ad142923402b969fd489c02db2a5 From 3bce233ceed826f8ea9e1aed18bde34271721efd Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 6 Feb 2024 14:36:04 -0600 Subject: [PATCH 102/103] address PR comments --- arbnode/inbox_reader.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index f452b0d890..7b6b96d43b 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -40,11 +40,9 @@ func (c *InboxReaderConfig) Validate() error { if c.MaxBlocksToRead == 0 || c.MaxBlocksToRead < c.DefaultBlocksToRead { return errors.New("inbox reader max-blocks-to-read cannot be zero or less than default-blocks-to-read") } - if c.ReadMode != "latest" { - c.ReadMode = strings.ToLower(c.ReadMode) - if c.ReadMode != "safe" && c.ReadMode != "finalized" { - return fmt.Errorf("inbox reader read-mode is invalid, want: safe or finalized, got: %s", c.ReadMode) - } + c.ReadMode = strings.ToLower(c.ReadMode) + if c.ReadMode != "latest" && c.ReadMode != "safe" && c.ReadMode != "finalized" { + return fmt.Errorf("inbox reader read-mode is invalid, want: latest or safe or finalized, got: %s", c.ReadMode) } return nil } @@ -57,7 +55,7 @@ func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".default-blocks-to-read", DefaultInboxReaderConfig.DefaultBlocksToRead, "the default number of blocks to read at once (will vary based on traffic by default)") f.Uint64(prefix+".target-messages-read", DefaultInboxReaderConfig.TargetMessagesRead, "if adjust-blocks-to-read is enabled, the target number of messages to read at once") f.Uint64(prefix+".max-blocks-to-read", DefaultInboxReaderConfig.MaxBlocksToRead, "if adjust-blocks-to-read is enabled, the maximum number of blocks to read at once") - f.String(prefix+".read-mode", DefaultInboxReaderConfig.ReadMode, "mode to only read safe or finalized L1 blocks. Takes string input, valid strings- safe, finalized") + f.String(prefix+".read-mode", DefaultInboxReaderConfig.ReadMode, "mode to only read latest or safe or finalized L1 blocks. Enabling safe or finalized disables feed input and output. Defaults to latest. Takes string input, valid strings- latest, safe, finalized") } var DefaultInboxReaderConfig = InboxReaderConfig{ From 668393b744afc490a37f27fa8e78c4e79c9875ae Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 7 Feb 2024 19:57:59 -0600 Subject: [PATCH 103/103] Produce better error messages when failing to read the delayed inbox --- arbnode/delayed.go | 4 ++-- arbnode/inbox_tracker.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arbnode/delayed.go b/arbnode/delayed.go index 2a1745c540..c166aa2b90 100644 --- a/arbnode/delayed.go +++ b/arbnode/delayed.go @@ -221,10 +221,10 @@ func (b *DelayedBridge) logsToDeliveredMessages(ctx context.Context, logs []type msgKey := common.BigToHash(parsedLog.MessageIndex) data, ok := messageData[msgKey] if !ok { - return nil, errors.New("message not found") + return nil, fmt.Errorf("message %v data not found", parsedLog.MessageIndex) } if crypto.Keccak256Hash(data) != parsedLog.MessageDataHash { - return nil, errors.New("found message data with mismatched hash") + return nil, fmt.Errorf("found message %v data with mismatched hash", parsedLog.MessageIndex) } requestId := common.BigToHash(parsedLog.MessageIndex) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index eaf863bffc..763ddcc420 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -374,11 +374,11 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR } if seqNum != pos { - return errors.New("unexpected delayed sequence number") + return fmt.Errorf("unexpected delayed sequence number %v, expected %v", seqNum, pos) } if nextAcc != message.BeforeInboxAcc { - return errors.New("previous delayed accumulator mismatch") + return fmt.Errorf("previous delayed accumulator mismatch for message %v", seqNum) } nextAcc = message.AfterInboxAcc()