From b5adaedafed993caab25316f5daff1d3fa8afc0a Mon Sep 17 00:00:00 2001 From: tongjicoder Date: Fri, 14 Jun 2024 18:09:07 +0800 Subject: [PATCH 01/69] chore: make function comment match function name Signed-off-by: tongjicoder --- validator/server_arb/execution_run.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/server_arb/execution_run.go b/validator/server_arb/execution_run.go index 255d42ab16..4af5736ba6 100644 --- a/validator/server_arb/execution_run.go +++ b/validator/server_arb/execution_run.go @@ -19,7 +19,7 @@ type executionRun struct { close sync.Once } -// NewExecutionChallengeBackend creates a backend with the given arguments. +// NewExecutionRun creates a backend with the given arguments. // Note: machineCache may be nil, but if present, it must not have a restricted range. func NewExecutionRun( ctxIn context.Context, From 0922b631ef582e58e193f38132c060392201702e Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Mon, 24 Jun 2024 17:19:15 -0300 Subject: [PATCH 02/69] testhelpers: print stack trace in require --- util/testhelpers/testhelpers.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/util/testhelpers/testhelpers.go b/util/testhelpers/testhelpers.go index 071429879e..b1b08708e7 100644 --- a/util/testhelpers/testhelpers.go +++ b/util/testhelpers/testhelpers.go @@ -11,6 +11,7 @@ import ( "math/rand" "os" "regexp" + "runtime/debug" "sync" "testing" @@ -24,6 +25,7 @@ import ( func RequireImpl(t *testing.T, err error, printables ...interface{}) { t.Helper() if err != nil { + t.Log(string(debug.Stack())) t.Fatal(colors.Red, printables, err, colors.Clear) } } From c9e1a6710929fe602dd8ea49dacfefbc5cbb2f2b Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Thu, 20 Jun 2024 20:37:39 -0300 Subject: [PATCH 03/69] sys_tests: set data dir when creating node --- system_tests/common_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index de8e2242b5..9c19a913d9 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -212,13 +212,13 @@ func (b *NodeBuilder) Build(t *testing.T) func() { if b.withL1 { l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = - createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) + createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info, b.dataDir) b.L1, b.L2 = l1, l2 b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } else { l2 := NewTestClient(b.ctx) b.L2Info, l2.ConsensusNode, l2.Client = - createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.takeOwnership) + createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.takeOwnership, b.dataDir) b.L2 = l2 } b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) @@ -820,6 +820,7 @@ func createTestNodeWithL1( chainConfig *params.ChainConfig, stackConfig *node.Config, l2info_in info, + dataDir string, ) ( l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, @@ -843,7 +844,7 @@ func createTestNodeWithL1( l2info = NewArbTestInfo(t, chainConfig.ChainID) } addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &execConfig.Caching) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, initMessage, stackConfig, &execConfig.Caching) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc if isSequencer { @@ -889,7 +890,7 @@ func createTestNodeWithL1( // L2 -Only. Enough for tests that needs no interface to L1 // Requires precompiles.AllowDebugPrecompiles = true func createTestNode( - t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, takeOwnership bool, + t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, takeOwnership bool, dataDir string, ) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL2Test() @@ -902,7 +903,7 @@ func createTestNode( AddDefaultValNode(t, ctx, nodeConfig, true, "") - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", chainConfig, &execConfig.Caching) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, dataDir, chainConfig, &execConfig.Caching) Require(t, execConfig.Validate()) execConfigFetcher := func() *gethexec.Config { return execConfig } From 22abce819457afa8dfbb4ad1fe636d7e3a7b6043 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Fri, 21 Jun 2024 10:57:07 -0300 Subject: [PATCH 04/69] sys_tests: set unique data dir per node The test failed because the builder was used to create multiple nodes with the same data directory. --- system_tests/seq_coordinator_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 43d55f40c9..1b8926a1b9 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -67,6 +67,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { createStartNode := func(nodeNum int) { builder.nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] builder.L2Info = l2Info + builder.dataDir = t.TempDir() // set new data dir for each node builder.Build(t) testNodes[nodeNum] = builder.L2 } From 262b6819b18452734cd0a9dbffec4d87aecf78d7 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Wed, 19 Jun 2024 18:19:12 -0300 Subject: [PATCH 05/69] sys_tests: use builder in blocks reexecutor test --- system_tests/blocks_reexecutor_test.go | 42 +++++++------------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/system_tests/blocks_reexecutor_test.go b/system_tests/blocks_reexecutor_test.go index 66690d1427..c6a7181c46 100644 --- a/system_tests/blocks_reexecutor_test.go +++ b/system_tests/blocks_reexecutor_test.go @@ -2,39 +2,29 @@ package arbtest import ( "context" - "math/big" "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" - "github.com/offchainlabs/nitro/execution/gethexec" ) func TestBlocksReExecutorModes(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execConfig := gethexec.ConfigDefaultTest() - Require(t, execConfig.Validate()) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() - execConfigFetcher := func() *gethexec.Config { return execConfig } - execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) - Require(t, err) - - parentChainID := big.NewInt(1234) + l2info := builder.L2Info + client := builder.L2.Client + blockchain := builder.L2.ExecNode.Backend.ArbInterface().BlockChain() feedErrChan := make(chan error, 10) - node, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) - Require(t, err) - err = node.TxStreamer.AddFakeInitMessage() - Require(t, err) - Require(t, node.Start(ctx)) - client := ClientForStack(t, stack) l2info.GenerateAccount("User2") - for i := 0; i < 100; i++ { + genesis, err := client.BlockNumber(ctx) + Require(t, err) + for i := genesis; i < genesis+100; i++ { tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) err := client.SendTransaction(ctx, tx) Require(t, err) @@ -49,14 +39,9 @@ func TestBlocksReExecutorModes(t *testing.T) { success := make(chan struct{}) executorFull := blocksreexecutor.New(&blocksreexecutor.TestConfig, blockchain, feedErrChan) executorFull.Start(ctx, success) - select { case err := <-feedErrChan: - t.Errorf("error occurred: %v", err) - if node != nil { - node.StopAndWait() - } - t.FailNow() + t.Fatalf("error occurred: %v", err) case <-success: } @@ -66,14 +51,9 @@ func TestBlocksReExecutorModes(t *testing.T) { c.Mode = "random" executorRandom := blocksreexecutor.New(c, blockchain, feedErrChan) executorRandom.Start(ctx, success) - select { case err := <-feedErrChan: - t.Errorf("error occurred: %v", err) - if node != nil { - node.StopAndWait() - } - t.FailNow() + t.Fatalf("error occurred: %v", err) case <-success: } } From 98c46b2c52478b9ecde01312f26e197f8606dc93 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Thu, 20 Jun 2024 20:38:39 -0300 Subject: [PATCH 06/69] sys_tests: use builder in recreate state test --- system_tests/common_test.go | 32 +++++++++++++++++ system_tests/recreatestate_rpc_test.go | 50 ++++++++------------------ 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 9c19a913d9..d9cc7534bf 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -231,6 +231,38 @@ func (b *NodeBuilder) Build(t *testing.T) func() { } } +// L2 -Only. RestartL2Node shutdowns the existing l2 node and start it again using the same data dir. +func (b *NodeBuilder) RestartL2Node(t *testing.T) { + if b.L2 == nil { + t.Fatalf("L2 was not created") + } + b.L2.cleanup() + + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, b.L2Info, b.dataDir, b.chainConfig, &b.execConfig.Caching) + + execConfigFetcher := func() *gethexec.Config { return b.execConfig } + execNode, err := gethexec.CreateExecutionNode(b.ctx, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + feedErrChan := make(chan error, 10) + currentNode, err := arbnode.CreateNode(b.ctx, stack, execNode, arbDb, NewFetcherFromConfig(b.nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337), nil) + Require(t, err) + + Require(t, currentNode.Start(b.ctx)) + client := ClientForStack(t, stack) + + StartWatchChanErr(t, b.ctx, feedErrChan, currentNode) + + l2 := NewTestClient(b.ctx) + l2.ConsensusNode = currentNode + l2.Client = client + l2.ExecNode = execNode + l2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } + + b.L2 = l2 + b.L2Info = l2info +} + func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*TestClient, func()) { if b.L2 == nil { t.Fatal("builder did not previously build a L2 Node") diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index bf321808de..de123aee78 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -20,7 +20,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util" ) @@ -330,7 +329,6 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ctx1, cancel1 := context.WithCancel(ctx) execConfig := gethexec.ConfigDefaultTest() execConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth execConfig.Sequencer.MaxBlockSpeed = 0 @@ -340,28 +338,19 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig skipBlocks := execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving skipGas := execConfig.Caching.MaxAmountOfGasToSkipStateSaving - feedErrChan := make(chan error, 10) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &execConfig.Caching) - - Require(t, execConfig.Validate()) - execConfigFetcher := func() *gethexec.Config { return execConfig } - execNode, err := gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) - Require(t, err) + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig = execConfig + cleanup := builder.Build(t) + defer cleanup() - parentChainID := big.NewInt(1337) - node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, parentChainID, nil) - Require(t, err) - err = node.TxStreamer.AddFakeInitMessage() + client := builder.L2.Client + l2info := builder.L2Info + genesis, err := client.BlockNumber(ctx) Require(t, err) - Require(t, node.Start(ctx1)) - client := ClientForStack(t, stack) - - StartWatchChanErr(t, ctx, feedErrChan, node) - dataDir := node.Stack.DataDir() l2info.GenerateAccount("User2") var txs []*types.Transaction - for i := 0; i < txCount; i++ { + for i := genesis; i < uint64(txCount)+genesis; i++ { tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) txs = append(txs, tx) err := client.SendTransaction(ctx, tx) @@ -372,8 +361,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig Fatal(t, "internal test error - tx got included in unexpected block number, have:", have, "want:", want) } } - bc := execNode.Backend.ArbInterface().BlockChain() - genesis := uint64(0) + bc := builder.L2.ExecNode.Backend.ArbInterface().BlockChain() currentHeader := bc.CurrentBlock() if currentHeader == nil { Fatal(t, "missing current block") @@ -385,24 +373,14 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig expectedBalance, err := client.BalanceAt(ctx, GetTestAddressForAccountName(t, "User2"), new(big.Int).SetUint64(lastBlock)) Require(t, err) - node.StopAndWait() - cancel1() - t.Log("stopped first node") - - l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + builder.RestartL2Node(t) + t.Log("restarted the node") - execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) - Require(t, err) - - node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan, parentChainID, nil) - Require(t, err) - Require(t, node.Start(ctx)) - client = ClientForStack(t, stack) - defer node.StopAndWait() - bc = execNode.Backend.ArbInterface().BlockChain() + client = builder.L2.Client + bc = builder.L2.ExecNode.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks - for i := genesis + 1; i <= genesis+uint64(txCount); i++ { + for i := genesis; i <= genesis+uint64(txCount); i++ { block := bc.GetBlockByNumber(i) if block == nil { Fatal(t, "header not found for block number:", i) From b08928dcc1877fda9e55235fd8fc899b6948d67a Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Fri, 28 Jun 2024 18:50:51 +0530 Subject: [PATCH 07/69] Skip TestSequencerPriceAdjustsFrom* on local builds --- .github/workflows/ci.yml | 2 +- system_tests/fees_test.go | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d97b5bfd5..c2ee39a6d1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -145,7 +145,7 @@ jobs: if: matrix.test-mode == 'defaults' run: | packages=`go list ./...` - stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m -parallel=8 > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m -parallel=8 -tags=feestest > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") - name: run tests with race detection if: matrix.test-mode == 'race' diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 4d8fbf43fd..2a53e42a23 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -2,8 +2,10 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE // these tests seems to consume too much memory with race detection -//go:build !race -// +build !race +// Test randomly fails with L1 gas price estimate should tend toward the basefee +// so skipping locally, but running on CI +//go:build !race && feestest +// +build !race,feestest package arbtest From 1e2f2b0d77666b4d9ad9aca34675598f1523487a Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Mon, 24 Jun 2024 12:59:12 -0300 Subject: [PATCH 08/69] sys_tests: move create node functions to builder --- system_tests/common_test.go | 289 ++++++++++++++++++------------------ 1 file changed, 142 insertions(+), 147 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index d9cc7534bf..345195f011 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -164,6 +164,8 @@ type NodeBuilder struct { isSequencer bool takeOwnership bool withL1 bool + addresses *chaininfo.RollupAddresses + initMessage *arbostypes.ParsedInitMessage // Created nodes L1 *TestClient @@ -202,6 +204,30 @@ func (b *NodeBuilder) WithArbOSVersion(arbosVersion uint64) *NodeBuilder { } func (b *NodeBuilder) Build(t *testing.T) func() { + b.CheckConfig(t) + if b.withL1 { + b.BuildL1(t) + return b.BuildL2WithL1(t) + } + return b.BuildL2(t) +} + +func (b *NodeBuilder) CheckConfig(t *testing.T) { + if b.chainConfig == nil { + b.chainConfig = params.ArbitrumDevTestChainConfig() + } + if b.nodeConfig == nil { + b.nodeConfig = arbnode.ConfigDefaultL1Test() + } + if b.execConfig == nil { + b.execConfig = gethexec.ConfigDefaultTest() + } + if b.L1Info == nil { + b.L1Info = NewL1TestInfo(t) + } + if b.L2Info == nil { + b.L2Info = NewArbTestInfo(t, b.chainConfig.ChainID) + } if b.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { if b.execConfig.Caching.Archive { b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth @@ -209,18 +235,67 @@ func (b *NodeBuilder) Build(t *testing.T) func() { b.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth } } - if b.withL1 { - l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) - b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = - createTestNodeWithL1(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info, b.dataDir) - b.L1, b.L2 = l1, l2 - b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } +} + +func (b *NodeBuilder) BuildL1(t *testing.T) { + b.L1 = NewTestClient(b.ctx) + b.L1Info, b.L1.Client, b.L1.L1Backend, b.L1.Stack = createTestL1BlockChain(t, b.L1Info) + b.addresses, b.initMessage = DeployOnTestL1(t, b.ctx, b.L1Info, b.L1.Client, b.chainConfig) + b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } +} + +func (b *NodeBuilder) BuildL2WithL1(t *testing.T) func() { + if b.L1 == nil { + panic("must build L1 before building L2") + } + b.L2 = NewTestClient(b.ctx) + + var l2chainDb ethdb.Database + var l2arbDb ethdb.Database + var l2blockchain *core.BlockChain + _, b.L2.Stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig( + t, b.L2Info, b.dataDir, b.chainConfig, b.initMessage, b.l2StackConfig, &b.execConfig.Caching) + + var sequencerTxOptsPtr *bind.TransactOpts + var dataSigner signature.DataSignerFunc + if b.isSequencer { + sequencerTxOpts := b.L1Info.GetDefaultTransactOpts("Sequencer", b.ctx) + sequencerTxOptsPtr = &sequencerTxOpts + dataSigner = signature.DataSignerFromPrivateKey(b.L1Info.GetInfoWithPrivKey("Sequencer").PrivateKey) } else { - l2 := NewTestClient(b.ctx) - b.L2Info, l2.ConsensusNode, l2.Client = - createTestNode(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.chainConfig, b.takeOwnership, b.dataDir) - b.L2 = l2 + b.nodeConfig.BatchPoster.Enable = false + b.nodeConfig.Sequencer = false + b.nodeConfig.DelayedSequencer.Enable = false + b.execConfig.Sequencer.Enable = false + } + + var validatorTxOptsPtr *bind.TransactOpts + if b.nodeConfig.Staker.Enable { + validatorTxOpts := b.L1Info.GetDefaultTransactOpts("Validator", b.ctx) + validatorTxOptsPtr = &validatorTxOpts } + + AddDefaultValNode(t, b.ctx, b.nodeConfig, true, "") + + Require(t, b.execConfig.Validate()) + execConfig := b.execConfig + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(b.ctx, b.L2.Stack, l2chainDb, l2blockchain, b.L1.Client, execConfigFetcher) + Require(t, err) + + fatalErrChan := make(chan error, 10) + b.L2.ConsensusNode, err = arbnode.CreateNode( + b.ctx, b.L2.Stack, execNode, l2arbDb, NewFetcherFromConfig(b.nodeConfig), l2blockchain.Config(), b.L1.Client, + b.addresses, validatorTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil) + Require(t, err) + + err = b.L2.ConsensusNode.Start(b.ctx) + Require(t, err) + + b.L2.Client = ClientForStack(t, b.L2.Stack) + + StartWatchChanErr(t, b.ctx, fatalErrChan, b.L2.ConsensusNode) + b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) b.L2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } return func() { @@ -231,6 +306,61 @@ func (b *NodeBuilder) Build(t *testing.T) func() { } } +// L2 -Only. Enough for tests that needs no interface to L1 +// Requires precompiles.AllowDebugPrecompiles = true +func (b *NodeBuilder) BuildL2(t *testing.T) func() { + b.L2 = NewTestClient(b.ctx) + + AddDefaultValNode(t, b.ctx, b.nodeConfig, true, "") + + var chainDb ethdb.Database + var arbDb ethdb.Database + var blockchain *core.BlockChain + b.L2Info, b.L2.Stack, chainDb, arbDb, blockchain = createL2BlockChain( + t, b.L2Info, b.dataDir, b.chainConfig, &b.execConfig.Caching) + + Require(t, b.execConfig.Validate()) + execConfig := b.execConfig + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(b.ctx, b.L2.Stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + fatalErrChan := make(chan error, 10) + b.L2.ConsensusNode, err = arbnode.CreateNode( + b.ctx, b.L2.Stack, execNode, arbDb, NewFetcherFromConfig(b.nodeConfig), blockchain.Config(), + nil, nil, nil, nil, nil, fatalErrChan, big.NewInt(1337), nil) + Require(t, err) + + // Give the node an init message + err = b.L2.ConsensusNode.TxStreamer.AddFakeInitMessage() + Require(t, err) + + err = b.L2.ConsensusNode.Start(b.ctx) + Require(t, err) + + b.L2.Client = ClientForStack(t, b.L2.Stack) + + if b.takeOwnership { + debugAuth := b.L2Info.GetDefaultTransactOpts("Owner", b.ctx) + + // make auth a chain owner + arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), b.L2.Client) + Require(t, err, "failed to deploy ArbDebug") + + tx, err := arbdebug.BecomeChainOwner(&debugAuth) + Require(t, err, "failed to deploy ArbDebug") + + _, err = EnsureTxSucceeded(b.ctx, b.L2.Client, tx) + Require(t, err) + } + + StartWatchChanErr(t, b.ctx, fatalErrChan, b.L2.ConsensusNode) + + b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) + b.L2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } + return func() { b.L2.cleanup() } +} + // L2 -Only. RestartL2Node shutdowns the existing l2 node and start it again using the same data dir. func (b *NodeBuilder) RestartL2Node(t *testing.T) { if b.L2 == nil { @@ -533,10 +663,6 @@ func (c *staticNodeConfigFetcher) Started() bool { return true } -func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, *eth.Ethereum, *node.Node) { - return createTestL1BlockChainWithConfig(t, l1info, nil) -} - func createStackConfigForTest(dataDir string) *node.Config { stackConf := node.DefaultConfig stackConf.DataDir = dataDir @@ -660,13 +786,11 @@ func AddDefaultValNode(t *testing.T, ctx context.Context, nodeConfig *arbnode.Co configByValidationNode(nodeConfig, valStack) } -func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *node.Config) (info, *ethclient.Client, *eth.Ethereum, *node.Node) { +func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, *eth.Ethereum, *node.Node) { if l1info == nil { l1info = NewL1TestInfo(t) } - if stackConfig == nil { - stackConfig = createStackConfigForTest(t.TempDir()) - } + stackConfig := createStackConfigForTest(t.TempDir()) l1info.GenerateAccount("Faucet") chainConfig := params.ArbitrumDevTestChainConfig() @@ -842,135 +966,6 @@ func ClientForStack(t *testing.T, backend *node.Node) *ethclient.Client { return ethclient.NewClient(rpcClient) } -// Create and deploy L1 and arbnode for L2 -func createTestNodeWithL1( - t *testing.T, - ctx context.Context, - isSequencer bool, - nodeConfig *arbnode.Config, - execConfig *gethexec.Config, - chainConfig *params.ChainConfig, - stackConfig *node.Config, - l2info_in info, - dataDir string, -) ( - l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l2stack *node.Node, - l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, -) { - if nodeConfig == nil { - nodeConfig = arbnode.ConfigDefaultL1Test() - } - if execConfig == nil { - execConfig = gethexec.ConfigDefaultTest() - } - if chainConfig == nil { - chainConfig = params.ArbitrumDevTestChainConfig() - } - fatalErrChan := make(chan error, 10) - l1info, l1client, l1backend, l1stack = createTestL1BlockChain(t, nil) - var l2chainDb ethdb.Database - var l2arbDb ethdb.Database - var l2blockchain *core.BlockChain - l2info = l2info_in - if l2info == nil { - l2info = NewArbTestInfo(t, chainConfig.ChainID) - } - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, initMessage, stackConfig, &execConfig.Caching) - var sequencerTxOptsPtr *bind.TransactOpts - var dataSigner signature.DataSignerFunc - if isSequencer { - sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - sequencerTxOptsPtr = &sequencerTxOpts - dataSigner = signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) - } - - if !isSequencer { - nodeConfig.BatchPoster.Enable = false - nodeConfig.Sequencer = false - nodeConfig.DelayedSequencer.Enable = false - execConfig.Sequencer.Enable = false - } - - var validatorTxOptsPtr *bind.TransactOpts - if nodeConfig.Staker.Enable { - validatorTxOpts := l1info.GetDefaultTransactOpts("Validator", ctx) - validatorTxOptsPtr = &validatorTxOpts - } - - AddDefaultValNode(t, ctx, nodeConfig, true, "") - - Require(t, execConfig.Validate()) - execConfigFetcher := func() *gethexec.Config { return execConfig } - execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) - Require(t, err) - currentNode, err = arbnode.CreateNode( - ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, - addresses, validatorTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, big.NewInt(1337), nil, - ) - Require(t, err) - - Require(t, currentNode.Start(ctx)) - - l2client = ClientForStack(t, l2stack) - - StartWatchChanErr(t, ctx, fatalErrChan, currentNode) - - return -} - -// L2 -Only. Enough for tests that needs no interface to L1 -// Requires precompiles.AllowDebugPrecompiles = true -func createTestNode( - t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, chainConfig *params.ChainConfig, takeOwnership bool, dataDir string, -) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { - if nodeConfig == nil { - nodeConfig = arbnode.ConfigDefaultL2Test() - } - if execConfig == nil { - execConfig = gethexec.ConfigDefaultTest() - } - - feedErrChan := make(chan error, 10) - - AddDefaultValNode(t, ctx, nodeConfig, true, "") - - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, dataDir, chainConfig, &execConfig.Caching) - - Require(t, execConfig.Validate()) - execConfigFetcher := func() *gethexec.Config { return execConfig } - execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) - Require(t, err) - - currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan, big.NewInt(1337), nil) - Require(t, err) - - // Give the node an init message - err = currentNode.TxStreamer.AddFakeInitMessage() - Require(t, err) - - Require(t, currentNode.Start(ctx)) - client := ClientForStack(t, stack) - - if takeOwnership { - debugAuth := l2info.GetDefaultTransactOpts("Owner", ctx) - - // make auth a chain owner - arbdebug, err := precompilesgen.NewArbDebug(common.HexToAddress("0xff"), client) - Require(t, err, "failed to deploy ArbDebug") - - tx, err := arbdebug.BecomeChainOwner(&debugAuth) - Require(t, err, "failed to deploy ArbDebug") - - _, err = EnsureTxSucceeded(ctx, client, tx) - Require(t, err) - } - - StartWatchChanErr(t, ctx, feedErrChan, currentNode) - - return l2info, currentNode, client -} - func StartWatchChanErr(t *testing.T, ctx context.Context, feedErrChan chan error, node *arbnode.Node) { go func() { select { From a80e3871a38a69eec724751cdf0dea3b06fc1f91 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Mon, 24 Jun 2024 15:30:10 -0300 Subject: [PATCH 09/69] sys_tests: use builder in node interface test --- system_tests/nodeinterface_test.go | 49 +++++++++++++----------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index b692af6e30..f1665e1d7c 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -15,8 +15,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" ) @@ -25,44 +23,39 @@ func TestFindBatch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l1Info := NewL1TestInfo(t) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + l1Info := builder.L1Info initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) l1Info.GenerateGenesisAccount("deployer", initialBalance) l1Info.GenerateGenesisAccount("asserter", initialBalance) l1Info.GenerateGenesisAccount("challenger", initialBalance) l1Info.GenerateGenesisAccount("sequencer", initialBalance) - l1Info, l1Backend, _, _ := createTestL1BlockChain(t, l1Info) - conf := arbnode.ConfigDefaultL1Test() + conf := builder.nodeConfig conf.BlockValidator.Enable = false conf.BatchPoster.Enable = false - chainConfig := params.ArbitrumDevTestChainConfig() - fatalErrChan := make(chan error, 10) - rollupAddresses, initMsg := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) - - bridgeAddr, seqInbox, seqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + builder.BuildL1(t) - callOpts := bind.CallOpts{Context: ctx} + bridgeAddr, seqInbox, seqInboxAddr := setupSequencerInboxStub(ctx, t, builder.L1Info, builder.L1.Client, builder.chainConfig) + builder.addresses.Bridge = bridgeAddr + builder.addresses.SequencerInbox = seqInboxAddr - rollupAddresses.Bridge = bridgeAddr - rollupAddresses.SequencerInbox = seqInboxAddr - l2Info := NewArbTestInfo(t, chainConfig.ChainID) - consensus, _ := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, l2Info, rollupAddresses, initMsg, nil, nil, fatalErrChan) - err := consensus.Start(ctx) - Require(t, err) + cleanup := builder.BuildL2WithL1(t) + defer cleanup() - l2Client := ClientForStack(t, consensus.Stack) - nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2Client) + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) Require(t, err) - sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) + sequencerTxOpts := builder.L1Info.GetDefaultTransactOpts("sequencer", ctx) - l2Info.GenerateAccount("Destination") - makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) - makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) - makeBatch(t, consensus, l2Info, l1Backend, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + builder.L2Info.GenerateAccount("Destination") + const numBatches = 3 + for i := 0; i < numBatches; i++ { + makeBatch(t, builder.L2.ConsensusNode, builder.L2Info, builder.L1.Client, &sequencerTxOpts, seqInbox, seqInboxAddr, -1) + } for blockNum := uint64(0); blockNum < uint64(makeBatch_MsgsPerBatch)*3; blockNum++ { + callOpts := bind.CallOpts{Context: ctx} gotBatchNum, err := nodeInterface.FindBatchContainingBlock(&callOpts, blockNum) Require(t, err) expBatchNum := uint64(0) @@ -72,17 +65,17 @@ func TestFindBatch(t *testing.T) { if expBatchNum != gotBatchNum { Fatal(t, "wrong result from findBatchContainingBlock. blocknum ", blockNum, " expected ", expBatchNum, " got ", gotBatchNum) } - batchL1Block, err := consensus.InboxTracker.GetBatchParentChainBlock(gotBatchNum) + batchL1Block, err := builder.L2.ConsensusNode.InboxTracker.GetBatchParentChainBlock(gotBatchNum) Require(t, err) - blockHeader, err := l2Client.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) + blockHeader, err := builder.L2.Client.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) Require(t, err) blockHash := blockHeader.Hash() - minCurrentL1Block, err := l1Backend.BlockNumber(ctx) + minCurrentL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) gotConfirmations, err := nodeInterface.GetL1Confirmations(&callOpts, blockHash) Require(t, err) - maxCurrentL1Block, err := l1Backend.BlockNumber(ctx) + maxCurrentL1Block, err := builder.L1.Client.BlockNumber(ctx) Require(t, err) if gotConfirmations > (maxCurrentL1Block-batchL1Block) || gotConfirmations < (minCurrentL1Block-batchL1Block) { From 156eb5ed6cbee192be5a22bc541599aed116fbb0 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Wed, 26 Jun 2024 12:18:29 -0300 Subject: [PATCH 10/69] sys_tests: use builder in DAS test --- system_tests/das_test.go | 167 +++++++++++++++------------------------ 1 file changed, 62 insertions(+), 105 deletions(-) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 593eaa1bbe..42cc84c1be 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -20,20 +20,16 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" - "github.com/offchainlabs/nitro/cmd/conf" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/util/signature" "golang.org/x/exp/slog" ) @@ -110,96 +106,68 @@ func TestDASRekey(t *testing.T) { defer cancel() // Setup L1 chain and contracts - chainConfig := params.ArbitrumDevTestDASChainConfig() - l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) - defer requireClose(t, l1stack) - feedErrChan := make(chan error, 10) - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.BuildL1(t) // Setup DAS servers dasDataDir := t.TempDir() - nodeDir := t.TempDir() - dasRpcServerA, pubkeyA, backendConfigA, _, restServerUrlA := startLocalDASServer(t, ctx, dasDataDir, l1client, addresses.SequencerInbox) - l2info := NewArbTestInfo(t, chainConfig.ChainID) - l1NodeConfigA := arbnode.ConfigDefaultL1Test() + dasRpcServerA, pubkeyA, backendConfigA, _, restServerUrlA := startLocalDASServer(t, ctx, dasDataDir, builder.L1.Client, builder.addresses.SequencerInbox) l1NodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest() - sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - sequencerTxOptsPtr := &sequencerTxOpts - parentChainID := big.NewInt(1337) { - authorizeDASKeyset(t, ctx, pubkeyA, l1info, l1client) - - // Setup L2 chain - _, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, l2info, nodeDir, chainConfig, initMessage, nil, nil) - l2info.GenerateAccount("User2") + authorizeDASKeyset(t, ctx, pubkeyA, builder.L1Info, builder.L1.Client) // Setup DAS config + builder.nodeConfig.DataAvailability.Enable = true + builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigA) + builder.nodeConfig.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + builder.nodeConfig.DataAvailability.RestAggregator.Enable = true + builder.nodeConfig.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} + builder.nodeConfig.DataAvailability.ParentChainNodeURL = "none" - l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigA) - l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestAggregator.Enable = true - l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} - l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" - execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) - Require(t, err) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) - Require(t, err) - Require(t, nodeA.Start(ctx)) - l2clientA := ClientForStack(t, l2stackA) + // Setup L2 chain + builder.L2Info.GenerateAccount("User2") + builder.BuildL2WithL1(t) + // Setup second node l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig l1NodeConfigB.DataAvailability.RestAggregator.Enable = true l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} - l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" + nodeBParams := SecondNodeParams{ + nodeConfig: l1NodeConfigB, + initData: &builder.L2Info.ArbInitData, + } + l2B, cleanupB := builder.Build2ndNode(t, &nodeBParams) + checkBatchPosting(t, ctx, builder.L1.Client, builder.L2.Client, builder.L1Info, builder.L2Info, big.NewInt(1e12), l2B.Client) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) - checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) - nodeA.StopAndWait() - nodeB.StopAndWait() + builder.L2.cleanup() + cleanupB() } err := dasRpcServerA.Shutdown(ctx) Require(t, err) - dasRpcServerB, pubkeyB, backendConfigB, _, _ := startLocalDASServer(t, ctx, dasDataDir, l1client, addresses.SequencerInbox) + dasRpcServerB, pubkeyB, backendConfigB, _, _ := startLocalDASServer(t, ctx, dasDataDir, builder.L1.Client, builder.addresses.SequencerInbox) defer func() { err = dasRpcServerB.Shutdown(ctx) Require(t, err) }() - authorizeDASKeyset(t, ctx, pubkeyB, l1info, l1client) + authorizeDASKeyset(t, ctx, pubkeyB, builder.L1Info, builder.L1.Client) // Restart the node on the new keyset against the new DAS server running on the same disk as the first with new keys - - stackConfig := createStackConfigForTest(nodeDir) - l2stackA, err := node.New(stackConfig) - Require(t, err) - - l2chainDb, err := l2stackA.OpenDatabaseWithExtraOptions("l2chaindata", 0, 0, "l2chaindata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("l2chaindata")) - Require(t, err) - - l2arbDb, err := l2stackA.OpenDatabaseWithExtraOptions("arbitrumdata", 0, 0, "arbitrumdata/", false, conf.PersistentConfigDefault.Pebble.ExtraOptions("arbitrumdata")) - Require(t, err) - - l2blockchain, err := gethexec.GetBlockChain(l2chainDb, nil, chainConfig, gethexec.ConfigDefaultTest().TxLookupLimit) - Require(t, err) - - execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) - Require(t, err) - - l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan, parentChainID, nil) - Require(t, err) - Require(t, nodeA.Start(ctx)) - l2clientA := ClientForStack(t, l2stackA) - - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) - checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(2e12), l2clientB) - - nodeA.StopAndWait() - nodeB.StopAndWait() + builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) + builder.l2StackConfig = createStackConfigForTest(builder.dataDir) + cleanup := builder.BuildL2WithL1(t) + defer cleanup() + + nodeBParams := SecondNodeParams{ + nodeConfig: l1NodeConfigB, + initData: &builder.L2Info.ArbInitData, + } + l2B, cleanup := builder.Build2ndNode(t, &nodeBParams) + defer cleanup() + checkBatchPosting(t, ctx, builder.L1.Client, builder.L2.Client, builder.L1Info, builder.L2Info, big.NewInt(2e12), l2B.Client) } func checkBatchPosting(t *testing.T, ctx context.Context, l1client, l2clientA *ethclient.Client, l1info, l2info info, expectedBalance *big.Int, l2ClientsToCheck ...*ethclient.Client) { @@ -240,16 +208,15 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { defer cancel() // Setup L1 chain and contracts - chainConfig := params.ArbitrumDevTestDASChainConfig() - l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) - defer requireClose(t, l1stack) - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.chainConfig = params.ArbitrumDevTestDASChainConfig() + builder.BuildL1(t) + + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, builder.L1.Client) + l1Reader, err := headerreader.New(ctx, builder.L1.Client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() - feedErrChan := make(chan error, 10) - addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) keyDir, fileDataDir, dbDataDir := t.TempDir(), t.TempDir(), t.TempDir() pubkey, _, err := das.GenerateAndStoreKeys(keyDir) @@ -278,7 +245,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { // L1NodeURL: normally we would have to set this but we are passing in the already constructed client and addresses to the factory } - daReader, daWriter, signatureVerifier, daHealthChecker, lifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig, l1Reader, &addresses.SequencerInbox) + daReader, daWriter, signatureVerifier, daHealthChecker, lifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig, l1Reader, &builder.addresses.SequencerInbox) Require(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) rpcLis, err := net.Listen("tcp", "localhost:0") @@ -288,13 +255,13 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { restLis, err := net.Listen("tcp", "localhost:0") Require(t, err) restServer, err := das.NewRestfulDasServerOnListener(restLis, genericconf.HTTPServerTimeoutConfigDefault, daReader, daHealthChecker) + Require(t, err) pubkeyA := pubkey - authorizeDASKeyset(t, ctx, pubkeyA, l1info, l1client) + authorizeDASKeyset(t, ctx, pubkeyA, builder.L1Info, builder.L1.Client) // - l1NodeConfigA := arbnode.ConfigDefaultL1Test() - l1NodeConfigA.DataAvailability = das.DataAvailabilityConfig{ + builder.nodeConfig.DataAvailability = das.DataAvailabilityConfig{ Enable: true, // AggregatorConfig set up below @@ -305,29 +272,17 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { PubKeyBase64Encoded: blsPubToBase64(pubkey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) - l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestAggregator.Enable = true - l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" - - dataSigner := signature.DataSignerFromPrivateKey(l1info.Accounts["Sequencer"].PrivateKey) - - Require(t, err) + builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) + builder.nodeConfig.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + builder.nodeConfig.DataAvailability.RestAggregator.Enable = true + builder.nodeConfig.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + builder.nodeConfig.DataAvailability.ParentChainNodeURL = "none" // Setup L2 chain - l2info, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) - l2info.GenerateAccount("User2") - - execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) - Require(t, err) - - sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - sequencerTxOptsPtr := &sequencerTxOpts - nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan, big.NewInt(1337), nil) - Require(t, err) - Require(t, nodeA.Start(ctx)) - l2clientA := ClientForStack(t, l2stackA) + builder.L2Info = NewArbTestInfo(t, builder.chainConfig.ChainID) + builder.L2Info.GenerateAccount("User2") + cleanup := builder.BuildL2WithL1(t) + defer cleanup() // Create node to sync from chain l1NodeConfigB := arbnode.ConfigDefaultL1NonSequencerTest() @@ -346,12 +301,14 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1NodeConfigB.DataAvailability.RestAggregator.Enable = true l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) - - checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) + nodeBParams := SecondNodeParams{ + nodeConfig: l1NodeConfigB, + initData: &builder.L2Info.ArbInitData, + } + l2B, cleanupB := builder.Build2ndNode(t, &nodeBParams) + defer cleanupB() - nodeA.StopAndWait() - nodeB.StopAndWait() + checkBatchPosting(t, ctx, builder.L1.Client, builder.L2.Client, builder.L1Info, builder.L2Info, big.NewInt(1e12), l2B.Client) err = restServer.Shutdown() Require(t, err) From 3e8e415c40e7f1c0301f5ac1f98ba7be71d6fef0 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Thu, 27 Jun 2024 12:14:59 -0300 Subject: [PATCH 11/69] sys_tests: add batch poster check Make sure that two batch posters aren't enabled for the same chain without having the batch poster redis coordinator enabled. --- system_tests/common_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 345195f011..d6b19bd9ca 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -424,6 +424,9 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth } } + if b.nodeConfig.BatchPoster.Enable && params.nodeConfig.BatchPoster.Enable && params.nodeConfig.BatchPoster.RedisUrl == "" { + t.Fatal("The batch poster must use Redis when enabled for multiple nodes") + } l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = From d6aa92642a79958fc1350744e6a209dc3ba94de8 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Fri, 28 Jun 2024 09:51:32 -0300 Subject: [PATCH 12/69] sys_tests: use builder in challenge tests --- system_tests/common_test.go | 11 ++++-- system_tests/full_challenge_impl_test.go | 50 +++++++++++------------- 2 files changed, 30 insertions(+), 31 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index d6b19bd9ca..34e165b38f 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -86,6 +86,7 @@ type SecondNodeParams struct { stackConfig *node.Config dasConfig *das.DataAvailabilityConfig initData *statetransfer.ArbosInitializationInfo + addresses *chaininfo.RollupAddresses } type TestClient struct { @@ -417,6 +418,9 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes if params.execConfig == nil { params.execConfig = b.execConfig } + if params.addresses == nil { + params.addresses = b.addresses + } if params.execConfig.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { if params.execConfig.Caching.Archive { params.execConfig.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth @@ -430,7 +434,7 @@ func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*Tes l2 := NewTestClient(b.ctx) l2.Client, l2.ConsensusNode = - Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig, params.addresses, b.initMessage) l2.ExecNode = getExecNode(t, l2.ConsensusNode) l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } return l2, func() { l2.cleanup() } @@ -1003,6 +1007,8 @@ func Create2ndNodeWithConfig( nodeConfig *arbnode.Config, execConfig *gethexec.Config, stackConfig *node.Config, + addresses *chaininfo.RollupAddresses, + initMessage *arbostypes.ParsedInitMessage, ) (*ethclient.Client, *arbnode.Node) { if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() @@ -1036,7 +1042,6 @@ func Create2ndNodeWithConfig( firstExec := getExecNode(t, first) chainConfig := firstExec.ArbInterface.BlockChain().Config() - initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) coreCacheConfig := gethexec.DefaultCacheConfigFor(l2stack, &execConfig.Caching) l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) @@ -1050,7 +1055,7 @@ func Create2ndNodeWithConfig( currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) Require(t, err) - currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, &validatorTxOpts, &sequencerTxOpts, dataSigner, feedErrChan, big.NewInt(1337), nil) Require(t, err) err = currentNode.Start(ctx) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index e4c49c3227..6274c7e825 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -25,17 +25,13 @@ import ( "github.com/offchainlabs/nitro/arbcompress" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" - "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/cmd/chaininfo" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" - "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" @@ -238,16 +234,6 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha return bridgeAddr, seqInbox, seqInboxAddr } -func createL2Nodes(t *testing.T, ctx context.Context, conf *arbnode.Config, chainConfig *params.ChainConfig, l1Client arbutil.L1Interface, l2info *BlockchainTestInfo, rollupAddresses *chaininfo.RollupAddresses, initMsg *arbostypes.ParsedInitMessage, txOpts *bind.TransactOpts, signer signature.DataSignerFunc, fatalErrChan chan error) (*arbnode.Node, *gethexec.ExecutionNode) { - _, stack, l2ChainDb, l2ArbDb, l2Blockchain := createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMsg, nil, nil) - execNode, err := gethexec.CreateExecutionNode(ctx, stack, l2ChainDb, l2Blockchain, l1Client, gethexec.ConfigDefaultTest) - Require(t, err) - consensusNode, err := arbnode.CreateNode(ctx, stack, execNode, l2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Client, rollupAddresses, txOpts, txOpts, signer, fatalErrChan, big.NewInt(1337), nil) - Require(t, err) - - return consensusNode, execNode -} - func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64) { glogger := log.NewGlogHandler( log.NewTerminalHandler(io.Writer(os.Stderr), false)) @@ -257,16 +243,16 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall ctx, cancel := context.WithCancel(context.Background()) defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) - l1Info := NewL1TestInfo(t) + l1Info := builder.L1Info l1Info.GenerateGenesisAccount("deployer", initialBalance) l1Info.GenerateGenesisAccount("asserter", initialBalance) l1Info.GenerateGenesisAccount("challenger", initialBalance) l1Info.GenerateGenesisAccount("sequencer", initialBalance) - chainConfig := params.ArbitrumDevTestChainConfig() - l1Info, l1Backend, _, _ := createTestL1BlockChain(t, l1Info) - conf := arbnode.ConfigDefaultL1Test() + chainConfig := builder.chainConfig + conf := builder.nodeConfig conf.BlockValidator.Enable = false conf.BatchPoster.Enable = false conf.InboxReader.CheckDelay = time.Second @@ -280,8 +266,8 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall } configByValidationNode(conf, valStack) - fatalErrChan := make(chan error, 10) - asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + builder.BuildL1(t) + l1Backend := builder.L1.Client deployerTxOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) @@ -291,20 +277,28 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterBridgeAddr, asserterSeqInbox, asserterSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) challengerBridgeAddr, challengerSeqInbox, challengerSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + asserterRollupAddresses := builder.addresses asserterRollupAddresses.Bridge = asserterBridgeAddr asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr - asserterL2Info := NewArbTestInfo(t, chainConfig.ChainID) - asserterL2, asserterExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, asserterL2Info, asserterRollupAddresses, initMessage, nil, nil, fatalErrChan) - err := asserterL2.Start(ctx) - Require(t, err) - challengerRollupAddresses := *asserterRollupAddresses + cleanup := builder.BuildL2WithL1(t) + defer cleanup() + asserterL2 := builder.L2.ConsensusNode + asserterL2Info := builder.L2Info + asserterExec := builder.L2.ExecNode + + challengerRollupAddresses := *builder.addresses challengerRollupAddresses.Bridge = challengerBridgeAddr challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr challengerL2Info := NewArbTestInfo(t, chainConfig.ChainID) - challengerL2, challengerExec := createL2Nodes(t, ctx, conf, chainConfig, l1Backend, challengerL2Info, &challengerRollupAddresses, initMessage, nil, nil, fatalErrChan) - err = challengerL2.Start(ctx) - Require(t, err) + challengerParams := SecondNodeParams{ + addresses: &challengerRollupAddresses, + initData: &challengerL2Info.ArbInitData, + } + challenger, challengerCleanup := builder.Build2ndNode(t, &challengerParams) + defer challengerCleanup() + challengerL2 := challenger.ConsensusNode + challengerExec := challenger.ExecNode asserterL2Info.GenerateAccount("Destination") challengerL2Info.SetFullAccountInfo("Destination", asserterL2Info.GetInfoWithPrivKey("Destination")) From 234415e220ef712a9daf4dd60c649debb24c4db9 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Fri, 28 Jun 2024 11:31:02 -0300 Subject: [PATCH 13/69] sys_tests: disable batch poster in second node --- system_tests/snap_sync_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/system_tests/snap_sync_test.go b/system_tests/snap_sync_test.go index dd22bb027c..31d5ea8492 100644 --- a/system_tests/snap_sync_test.go +++ b/system_tests/snap_sync_test.go @@ -39,7 +39,13 @@ func TestSnapSync(t *testing.T) { // This node will be stopped in middle and arbitrumdata will be deleted. testDir := t.TempDir() nodeBStack := createStackConfigForTest(testDir) - nodeB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{stackConfig: nodeBStack}) + nodeBConfig := builder.nodeConfig + nodeBConfig.BatchPoster.Enable = false + nodeBParams := &SecondNodeParams{ + stackConfig: nodeBStack, + nodeConfig: nodeBConfig, + } + nodeB, cleanupB := builder.Build2ndNode(t, nodeBParams) builder.BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000))) From 49fe0bcfa1f643aac2e4550bfdaec08b171b626f Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 1 Jul 2024 18:10:31 +0530 Subject: [PATCH 14/69] Change atomic.Store to atomic.Type --- arbnode/batch_poster.go | 8 ++--- arbnode/inbox_reader.go | 16 ++++----- arbnode/redislock/redis.go | 10 +++--- arbnode/seq_coordinator.go | 12 +++---- arbnode/seq_coordinator_atomic_test.go | 18 +++++----- arbnode/simple_redis_lock_test.go | 10 +++--- arbnode/transaction_streamer.go | 20 +++++------ arbos/programs/native_api.go | 4 +-- broadcastclient/broadcastclient.go | 6 ++-- broadcastclients/broadcastclients.go | 4 +-- das/reader_aggregator_strategies.go | 4 +-- execution/gethexec/sequencer.go | 11 +++--- staker/block_validator.go | 42 +++++++++++------------ system_tests/seq_nonce_test.go | 10 +++--- system_tests/seq_reject_test.go | 10 +++--- system_tests/test_info.go | 6 ++-- util/containers/promise.go | 6 ++-- util/containers/promise_test.go | 12 +++---- util/rpcclient/rpcclient.go | 4 +-- util/rpcclient/rpcclient_test.go | 13 ++++--- validator/client/redis/producer.go | 15 ++++---- validator/client/validation_client.go | 10 +++--- validator/server_arb/machine.go | 4 +-- validator/server_arb/validator_spawner.go | 6 ++-- validator/server_jit/spawner.go | 6 ++-- wsbroadcastserver/clientconnection.go | 11 +++--- wsbroadcastserver/clientmanager.go | 8 ++--- 27 files changed, 145 insertions(+), 141 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 1829ae29f5..9993323543 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -112,7 +112,7 @@ type BatchPoster struct { // This is an atomic variable that should only be accessed atomically. // An estimate of the number of batches we want to post but haven't yet. // This doesn't include batches which we don't want to post yet due to the L1 bounds. - backlog uint64 + backlog atomic.Uint64 lastHitL1Bounds time.Time // The last time we wanted to post a message but hit the L1 bounds batchReverted atomic.Bool // indicates whether data poster batch was reverted @@ -1083,7 +1083,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if config.IgnoreBlobPrice { use4844 = true } else { - backlog := atomic.LoadUint64(&b.backlog) + backlog := b.backlog.Load() // Logic to prevent switching from non-4844 batches to 4844 batches too often, // so that blocks can be filled efficiently. The geth txpool rejects txs for // accounts that already have the other type of txs in the pool with @@ -1411,7 +1411,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // Setting the backlog to 0 here ensures that we don't lower compression as a result. backlog = 0 } - atomic.StoreUint64(&b.backlog, backlog) + b.backlog.Store(backlog) b.building = nil // If we aren't queueing up transactions, wait for the receipt before moving on to the next batch. @@ -1427,7 +1427,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } func (b *BatchPoster) GetBacklogEstimate() uint64 { - return atomic.LoadUint64(&b.backlog) + return b.backlog.Load() } func (b *BatchPoster) Start(ctxIn context.Context) { diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 3ba9aa78f3..78b4db1929 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -97,8 +97,8 @@ type InboxReader struct { l1Reader *headerreader.HeaderReader // Atomic - lastSeenBatchCount uint64 - lastReadBatchCount uint64 + lastSeenBatchCount atomic.Uint64 + lastReadBatchCount atomic.Uint64 } func NewInboxReader(tracker *InboxTracker, client arbutil.L1Interface, l1Reader *headerreader.HeaderReader, firstMessageBlock *big.Int, delayedBridge *DelayedBridge, sequencerInbox *SequencerInbox, config InboxReaderConfigFetcher) (*InboxReader, error) { @@ -240,7 +240,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { seenBatchCountStored := uint64(math.MaxUint64) storeSeenBatchCount := func() { if seenBatchCountStored != seenBatchCount { - atomic.StoreUint64(&r.lastSeenBatchCount, seenBatchCount) + r.lastSeenBatchCount.Store(seenBatchCount) seenBatchCountStored = seenBatchCount } } @@ -394,7 +394,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { // There's nothing to do from = arbmath.BigAddByUint(currentHeight, 1) blocksToFetch = config.DefaultBlocksToRead - atomic.StoreUint64(&r.lastReadBatchCount, checkingBatchCount) + r.lastReadBatchCount.Store(checkingBatchCount) storeSeenBatchCount() if !r.caughtUp && readMode == "latest" { r.caughtUp = true @@ -526,7 +526,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if len(sequencerBatches) > 0 { readAnyBatches = true - atomic.StoreUint64(&r.lastReadBatchCount, sequencerBatches[len(sequencerBatches)-1].SequenceNumber+1) + r.lastReadBatchCount.Store(sequencerBatches[len(sequencerBatches)-1].SequenceNumber + 1) storeSeenBatchCount() } } @@ -553,7 +553,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } if !readAnyBatches { - atomic.StoreUint64(&r.lastReadBatchCount, checkingBatchCount) + r.lastReadBatchCount.Store(checkingBatchCount) storeSeenBatchCount() } } @@ -625,7 +625,7 @@ func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint6 } func (r *InboxReader) GetLastReadBatchCount() uint64 { - return atomic.LoadUint64(&r.lastReadBatchCount) + return r.lastReadBatchCount.Load() } // GetLastSeenBatchCount returns how many sequencer batches the inbox reader has read in from L1. @@ -633,7 +633,7 @@ func (r *InboxReader) GetLastReadBatchCount() uint64 { // >0 - last batchcount seen in run() - only written after lastReadBatchCount updated // 0 - no batch seen, error func (r *InboxReader) GetLastSeenBatchCount() uint64 { - return atomic.LoadUint64(&r.lastSeenBatchCount) + return r.lastSeenBatchCount.Load() } func (r *InboxReader) GetDelayBlocks() uint64 { diff --git a/arbnode/redislock/redis.go b/arbnode/redislock/redis.go index 09296e3c79..7e26010cae 100644 --- a/arbnode/redislock/redis.go +++ b/arbnode/redislock/redis.go @@ -21,7 +21,7 @@ type Simple struct { stopwaiter.StopWaiter client redis.UniversalClient config SimpleCfgFetcher - lockedUntil int64 + lockedUntil atomic.Int64 mutex sync.Mutex stopping bool readyToLock func() bool @@ -239,12 +239,12 @@ func execTestPipe(pipe redis.Pipeliner, ctx context.Context) error { } // notice: It is possible for two consecutive reads to get decreasing values. That shouldn't matter. -func atomicTimeRead(addr *int64) time.Time { - asint64 := atomic.LoadInt64(addr) +func atomicTimeRead(addr *atomic.Int64) time.Time { + asint64 := addr.Load() return time.UnixMilli(asint64) } -func atomicTimeWrite(addr *int64, t time.Time) { +func atomicTimeWrite(addr *atomic.Int64, t time.Time) { asint64 := t.UnixMilli() - atomic.StoreInt64(addr, asint64) + addr.Store(asint64) } diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index cdf1011b11..98c19ce361 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -48,7 +48,7 @@ type SeqCoordinator struct { prevChosenSequencer string reportedWantsLockout bool - lockoutUntil int64 // atomic + lockoutUntil atomic.Int64 // atomic wantsLockoutMutex sync.Mutex // manages access to acquireLockoutAndWriteMessage and generally the wants lockout key avoidLockout int // If > 0, prevents acquiring the lockout but not extending the lockout if no alternative sequencer wants the lockout. Protected by chosenUpdateMutex. @@ -191,14 +191,14 @@ func StandaloneSeqCoordinatorInvalidateMsgIndex(ctx context.Context, redisClient return nil } -func atomicTimeWrite(addr *int64, t time.Time) { +func atomicTimeWrite(addr *atomic.Int64, t time.Time) { asint64 := t.UnixMilli() - atomic.StoreInt64(addr, asint64) + addr.Store(asint64) } // notice: It is possible for two consecutive reads to get decreasing values. That shouldn't matter. -func atomicTimeRead(addr *int64) time.Time { - asint64 := atomic.LoadInt64(addr) +func atomicTimeRead(addr *atomic.Int64) time.Time { + asint64 := addr.Load() return time.UnixMilli(asint64) } @@ -692,7 +692,7 @@ func (c *SeqCoordinator) DebugPrint() string { return fmt.Sprint("Url:", c.config.Url(), " prevChosenSequencer:", c.prevChosenSequencer, " reportedWantsLockout:", c.reportedWantsLockout, - " lockoutUntil:", c.lockoutUntil, + " lockoutUntil:", c.lockoutUntil.Load(), " redisErrors:", c.redisErrors) } diff --git a/arbnode/seq_coordinator_atomic_test.go b/arbnode/seq_coordinator_atomic_test.go index 61468a3adb..9b9d9dea81 100644 --- a/arbnode/seq_coordinator_atomic_test.go +++ b/arbnode/seq_coordinator_atomic_test.go @@ -21,21 +21,21 @@ import ( const messagesPerRound = 20 type CoordinatorTestData struct { - messageCount uint64 + messageCount atomic.Uint64 sequencer []string err error mutex sync.Mutex waitForCoords sync.WaitGroup - testStartRound int32 + testStartRound atomic.Int32 } func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *CoordinatorTestData) { nextRound := int32(0) for { sequenced := make([]bool, messagesPerRound) - for atomic.LoadInt32(&data.testStartRound) < nextRound { + for data.testStartRound.Load() < nextRound { if ctx.Err() != nil { return } @@ -44,7 +44,7 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo nextRound++ var execError error for { - messageCount := atomic.LoadUint64(&data.messageCount) + messageCount := data.messageCount.Load() if messageCount >= messagesPerRound { break } @@ -53,7 +53,7 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo err := coord.acquireLockoutAndWriteMessage(ctx, asIndex, asIndex+1, &arbostypes.EmptyTestMessageWithMetadata) if err == nil { sequenced[messageCount] = true - atomic.StoreUint64(&data.messageCount, messageCount+1) + data.messageCount.Store(messageCount + 1) randNr := rand.Intn(20) if randNr > 15 { execError = coord.chosenOneRelease(ctx) @@ -105,9 +105,9 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { coordConfig.Signer.Symmetric.Dangerous.DisableSignatureVerification = true coordConfig.Signer.Symmetric.SigningKey = "" testData := CoordinatorTestData{ - testStartRound: -1, - sequencer: make([]string, messagesPerRound), + sequencer: make([]string, messagesPerRound), } + testData.testStartRound.Store(-1) nullSigner, err := signature.NewSignVerify(&coordConfig.Signer, nil, nil) Require(t, err) @@ -134,12 +134,12 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { for round := int32(0); round < 10; round++ { redisClient.Del(ctx, redisutil.CHOSENSEQ_KEY, redisutil.MSG_COUNT_KEY) - testData.messageCount = 0 + testData.messageCount.Store(0) for i := 0; i < messagesPerRound; i++ { testData.sequencer[i] = "" } testData.waitForCoords.Add(NumOfThreads) - atomic.StoreInt32(&testData.testStartRound, round) + testData.testStartRound.Store(round) testData.waitForCoords.Wait() Require(t, testData.err) seqList := "" diff --git a/arbnode/simple_redis_lock_test.go b/arbnode/simple_redis_lock_test.go index c9dd576749..25805de0de 100644 --- a/arbnode/simple_redis_lock_test.go +++ b/arbnode/simple_redis_lock_test.go @@ -21,11 +21,11 @@ const test_release_frac = 5 const test_delay = time.Millisecond const test_redisKey_prefix = "__TEMP_SimpleRedisLockTest__" -func attemptLock(ctx context.Context, s *redislock.Simple, flag *int32, wg *sync.WaitGroup) { +func attemptLock(ctx context.Context, s *redislock.Simple, flag *atomic.Int32, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < test_attempts; i++ { if s.AttemptLock(ctx) { - atomic.AddInt32(flag, 1) + flag.Add(1) } else if rand.Intn(test_release_frac) == 0 { s.Release(ctx) } @@ -76,7 +76,7 @@ func simpleRedisLockTest(t *testing.T, redisKeySuffix string, chosen int, backgo <-time.After(time.Second) } wg := sync.WaitGroup{} - counters := make([]int32, test_threads) + counters := make([]atomic.Int32, test_threads) for i, lock := range locks { wg.Add(1) go attemptLock(ctx, lock, &counters[i], &wg) @@ -84,8 +84,8 @@ func simpleRedisLockTest(t *testing.T, redisKeySuffix string, chosen int, backgo wg.Wait() successful := -1 for i, counter := range counters { - if counter != 0 { - if counter != test_attempts { + if counter.Load() != 0 { + if counter.Load() != test_attempts { t.Fatalf("counter %d value %d", i, counter) } if successful > 0 { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 5c02129ee6..22ecdd553c 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -62,7 +62,7 @@ type TransactionStreamer struct { nextAllowedFeedReorgLog time.Time broadcasterQueuedMessages []arbostypes.MessageWithMetadataAndBlockHash - broadcasterQueuedMessagesPos uint64 + broadcasterQueuedMessagesPos atomic.Uint64 broadcasterQueuedMessagesActiveReorg bool coordinator *SeqCoordinator @@ -491,14 +491,14 @@ func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreC } func (s *TransactionStreamer) FeedPendingMessageCount() arbutil.MessageIndex { - pos := atomic.LoadUint64(&s.broadcasterQueuedMessagesPos) + pos := s.broadcasterQueuedMessagesPos.Load() if pos == 0 { return 0 } s.insertionMutex.Lock() defer s.insertionMutex.Unlock() - pos = atomic.LoadUint64(&s.broadcasterQueuedMessagesPos) + pos = s.broadcasterQueuedMessagesPos.Load() if pos == 0 { return 0 } @@ -552,14 +552,14 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe if len(s.broadcasterQueuedMessages) == 0 || (feedReorg && !s.broadcasterQueuedMessagesActiveReorg) { // Empty cache or feed different from database, save current feed messages until confirmed L1 messages catch up. s.broadcasterQueuedMessages = messages - atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, uint64(broadcastStartPos)) + s.broadcasterQueuedMessagesPos.Store(uint64(broadcastStartPos)) s.broadcasterQueuedMessagesActiveReorg = feedReorg } else { - broadcasterQueuedMessagesPos := arbutil.MessageIndex(atomic.LoadUint64(&s.broadcasterQueuedMessagesPos)) + broadcasterQueuedMessagesPos := arbutil.MessageIndex(s.broadcasterQueuedMessagesPos.Load()) if broadcasterQueuedMessagesPos >= broadcastStartPos { // Feed messages older than cache s.broadcasterQueuedMessages = messages - atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, uint64(broadcastStartPos)) + s.broadcasterQueuedMessagesPos.Store(uint64(broadcastStartPos)) s.broadcasterQueuedMessagesActiveReorg = feedReorg } else if broadcasterQueuedMessagesPos+arbutil.MessageIndex(len(s.broadcasterQueuedMessages)) == broadcastStartPos { // Feed messages can be added directly to end of cache @@ -579,7 +579,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFe ) } s.broadcasterQueuedMessages = messages - atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, uint64(broadcastStartPos)) + s.broadcasterQueuedMessagesPos.Store(uint64(broadcastStartPos)) s.broadcasterQueuedMessagesActiveReorg = feedReorg } } @@ -795,7 +795,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil var cacheClearLen int messagesAfterPos := messageStartPos + arbutil.MessageIndex(len(messages)) - broadcastStartPos := arbutil.MessageIndex(atomic.LoadUint64(&s.broadcasterQueuedMessagesPos)) + broadcastStartPos := arbutil.MessageIndex(s.broadcasterQueuedMessagesPos.Load()) if messagesAreConfirmed { var duplicates int @@ -903,10 +903,10 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil // Check if new messages were added at the end of cache, if they were, then dont remove those particular messages if len(s.broadcasterQueuedMessages) > cacheClearLen { s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[cacheClearLen:] - atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, uint64(broadcastStartPos)+uint64(cacheClearLen)) + s.broadcasterQueuedMessagesPos.Store(uint64(broadcastStartPos) + uint64(cacheClearLen)) } else { s.broadcasterQueuedMessages = s.broadcasterQueuedMessages[:0] - atomic.StoreUint64(&s.broadcasterQueuedMessagesPos, 0) + s.broadcasterQueuedMessagesPos.Store(0) } s.broadcasterQueuedMessagesActiveReorg = false } diff --git a/arbos/programs/native_api.go b/arbos/programs/native_api.go index 136f74c964..6fbb630ef3 100644 --- a/arbos/programs/native_api.go +++ b/arbos/programs/native_api.go @@ -34,7 +34,7 @@ import ( ) var apiObjects sync.Map -var apiIds uintptr // atomic and sequential +var apiIds atomic.Uintptr // atomic and sequential type NativeApi struct { handler RequestHandler @@ -49,7 +49,7 @@ func newApi( memoryModel *MemoryModel, ) NativeApi { handler := newApiClosures(interpreter, tracingInfo, scope, memoryModel) - apiId := atomic.AddUintptr(&apiIds, 1) + apiId := apiIds.Add(1) id := usize(apiId) api := NativeApi{ handler: handler, diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 1167dba133..c111551b2e 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -134,7 +134,7 @@ type BroadcastClient struct { connMutex sync.Mutex conn net.Conn - retryCount int64 + retryCount atomic.Int64 retrying bool shuttingDown bool @@ -432,7 +432,7 @@ func (bc *BroadcastClient) startBackgroundReader(earlyFrameData io.Reader) { } func (bc *BroadcastClient) GetRetryCount() int64 { - return atomic.LoadInt64(&bc.retryCount) + return bc.retryCount.Load() } func (bc *BroadcastClient) isShuttingDown() bool { @@ -455,7 +455,7 @@ func (bc *BroadcastClient) retryConnect(ctx context.Context) io.Reader { case <-timer.C: } - atomic.AddInt64(&bc.retryCount, 1) + bc.retryCount.Add(1) earlyFrameData, err := bc.connect(ctx, bc.nextSeqNum) if err == nil { bc.retrying = false diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 23c4bd7738..8cd124bfe0 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -49,7 +49,7 @@ type BroadcastClients struct { secondaryRouter *Router // Use atomic access - connected int32 + connected atomic.Int32 } func NewBroadcastClients( @@ -113,7 +113,7 @@ func NewBroadcastClients( } func (bcs *BroadcastClients) adjustCount(delta int32) { - connected := atomic.AddInt32(&bcs.connected, delta) + connected := bcs.connected.Add(delta) if connected <= 0 { log.Error("no connected feed") } diff --git a/das/reader_aggregator_strategies.go b/das/reader_aggregator_strategies.go index d20760bd5b..8e10d52c16 100644 --- a/das/reader_aggregator_strategies.go +++ b/das/reader_aggregator_strategies.go @@ -41,7 +41,7 @@ func (s *abstractAggregatorStrategy) update(readers []daprovider.DASReader, stat // Exponentially growing Explore Exploit Strategy type simpleExploreExploitStrategy struct { - iterations uint32 + iterations atomic.Uint32 exploreIterations uint32 exploitIterations uint32 @@ -49,7 +49,7 @@ type simpleExploreExploitStrategy struct { } func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance { - iterations := atomic.AddUint32(&s.iterations, 1) + iterations := s.iterations.Add(1) readerSets := make([][]daprovider.DASReader, 0) s.RLock() diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 2bace9b677..bc93309c66 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -321,7 +321,7 @@ type Sequencer struct { onForwarderSet chan struct{} L1BlockAndTimeMutex sync.Mutex - l1BlockNumber uint64 + l1BlockNumber atomic.Uint64 l1Timestamp uint64 // activeMutex manages pauseChan (pauses execution) and forwarder @@ -356,7 +356,6 @@ func NewSequencer(execEngine *ExecutionEngine, l1Reader *headerreader.HeaderRead config: configFetcher, senderWhitelist: senderWhitelist, nonceCache: newNonceCache(config.NonceCacheSize), - l1BlockNumber: 0, l1Timestamp: 0, pauseChan: nil, onForwarderSet: make(chan struct{}, 1), @@ -900,7 +899,7 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { timestamp := time.Now().Unix() s.L1BlockAndTimeMutex.Lock() - l1Block := s.l1BlockNumber + l1Block := s.l1BlockNumber.Load() l1Timestamp := s.l1Timestamp s.L1BlockAndTimeMutex.Unlock() @@ -1014,9 +1013,9 @@ func (s *Sequencer) updateLatestParentChainBlock(header *types.Header) { defer s.L1BlockAndTimeMutex.Unlock() l1BlockNumber := arbutil.ParentHeaderToL1BlockNumber(header) - if header.Time > s.l1Timestamp || (header.Time == s.l1Timestamp && l1BlockNumber > s.l1BlockNumber) { + if header.Time > s.l1Timestamp || (header.Time == s.l1Timestamp && l1BlockNumber > s.l1BlockNumber.Load()) { s.l1Timestamp = header.Time - s.l1BlockNumber = l1BlockNumber + s.l1BlockNumber.Store(l1BlockNumber) } } @@ -1082,7 +1081,7 @@ func (s *Sequencer) Start(ctxIn context.Context) error { } if s.l1Reader != nil { - initialBlockNr := atomic.LoadUint64(&s.l1BlockNumber) + initialBlockNr := s.l1BlockNumber.Load() if initialBlockNr == 0 { return errors.New("sequencer not initialized") } diff --git a/staker/block_validator.go b/staker/block_validator.go index 0fea05469f..df9f01d76c 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -64,9 +64,9 @@ type BlockValidator struct { // can be read (atomic.Load) by anyone holding reorg-read // written (atomic.Set) by appropriate thread or (any way) holding reorg-write - createdA uint64 - recordSentA uint64 - validatedA uint64 + createdA atomic.Uint64 + recordSentA atomic.Uint64 + validatedA atomic.Uint64 validations containers.SyncMap[arbutil.MessageIndex, *validationStatus] config BlockValidatorConfigFetcher @@ -208,19 +208,19 @@ const ( ) type validationStatus struct { - Status uint32 // atomic: value is one of validationStatus* + Status atomic.Uint32 // atomic: value is one of validationStatus* Cancel func() // non-atomic: only read/written to with reorg mutex Entry *validationEntry // non-atomic: only read if Status >= validationStatusPrepared Runs []validator.ValidationRun // if status >= ValidationSent } func (s *validationStatus) getStatus() valStatusField { - uintStat := atomic.LoadUint32(&s.Status) + uintStat := s.Status.Load() return valStatusField(uintStat) } func (s *validationStatus) replaceStatus(old, new valStatusField) bool { - return atomic.CompareAndSwapUint32(&s.Status, uint32(old), uint32(new)) + return s.Status.CompareAndSwap(uint32(old), uint32(new)) } func NewBlockValidator( @@ -283,12 +283,12 @@ func NewBlockValidator( return ret, nil } -func atomicStorePos(addr *uint64, val arbutil.MessageIndex) { - atomic.StoreUint64(addr, uint64(val)) +func atomicStorePos(addr *atomic.Uint64, val arbutil.MessageIndex) { + addr.Store(uint64(val)) } -func atomicLoadPos(addr *uint64) arbutil.MessageIndex { - return arbutil.MessageIndex(atomic.LoadUint64(addr)) +func atomicLoadPos(addr *atomic.Uint64) arbutil.MessageIndex { + return arbutil.MessageIndex(addr.Load()) } func (v *BlockValidator) created() arbutil.MessageIndex { @@ -582,9 +582,9 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e return false, err } status := &validationStatus{ - Status: uint32(Created), - Entry: entry, + Entry: entry, } + status.Status.Store(uint32(Created)) v.validations.Store(pos, status) v.nextCreateStartGS = endGS v.nextCreatePrevDelayed = msg.DelayedMessagesRead @@ -962,13 +962,13 @@ func (v *BlockValidator) UpdateLatestStaked(count arbutil.MessageIndex, globalSt v.nextCreateStartGS = globalState v.nextCreatePrevDelayed = msg.DelayedMessagesRead v.nextCreateBatchReread = true - v.createdA = countUint64 + v.createdA.Store(countUint64) } // under the reorg mutex we don't need atomic access - if v.recordSentA < countUint64 { - v.recordSentA = countUint64 + if v.recordSentA.Load() < countUint64 { + v.recordSentA.Store(countUint64) } - v.validatedA = countUint64 + v.validatedA.Store(countUint64) v.valLoopPos = count validatorMsgCountValidatedGauge.Update(int64(countUint64)) err = v.writeLastValidated(globalState, nil) // we don't know which wasm roots were validated @@ -1027,13 +1027,13 @@ func (v *BlockValidator) Reorg(ctx context.Context, count arbutil.MessageIndex) v.nextCreatePrevDelayed = msg.DelayedMessagesRead v.nextCreateBatchReread = true countUint64 := uint64(count) - v.createdA = countUint64 + v.createdA.Store(countUint64) // under the reorg mutex we don't need atomic access - if v.recordSentA > countUint64 { - v.recordSentA = countUint64 + if v.recordSentA.Load() > countUint64 { + v.recordSentA.Store(countUint64) } - if v.validatedA > countUint64 { - v.validatedA = countUint64 + if v.validatedA.Load() > countUint64 { + v.validatedA.Store(countUint64) validatorMsgCountValidatedGauge.Update(int64(countUint64)) err := v.writeLastValidated(v.nextCreateStartGS, nil) // we don't know which wasm roots were validated if err != nil { diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index f0e3dcffd7..72629e1978 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -67,7 +67,7 @@ func TestSequencerNonceTooHigh(t *testing.T) { cleanup := builder.Build(t) defer cleanup() - builder.L2Info.GetInfoWithPrivKey("Owner").Nonce++ + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce.Add(1) before := time.Now() tx := builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, common.Big0, nil) @@ -97,21 +97,21 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { defer cleanup() count := 15 - var completed uint64 + var completed atomic.Uint64 for i := 0; i < count; i++ { - builder.L2Info.GetInfoWithPrivKey("Owner").Nonce++ + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce.Add(1) tx := builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, common.Big0, nil) go func() { err := builder.L2.Client.SendTransaction(ctx, tx) if err == nil { Fatal(t, "No error when nonce was too high") } - atomic.AddUint64(&completed, 1) + completed.Add(1) }() } for wait := 9; wait >= 0; wait-- { - got := int(atomic.LoadUint64(&completed)) + got := int(completed.Load()) expected := count - builder.execConfig.Sequencer.NonceFailureCacheSize if got == expected { break diff --git a/system_tests/seq_reject_test.go b/system_tests/seq_reject_test.go index 76bdfc2612..2dbdba6804 100644 --- a/system_tests/seq_reject_test.go +++ b/system_tests/seq_reject_test.go @@ -54,7 +54,7 @@ func TestSequencerRejection(t *testing.T) { } wg := sync.WaitGroup{} - var stopBackground int32 + var stopBackground atomic.Int32 for user := 0; user < 9; user++ { user := user name := fmt.Sprintf("User%v", user) @@ -78,12 +78,12 @@ func TestSequencerRejection(t *testing.T) { GasFeeCap: arbmath.BigMulByUint(builderSeq.L2Info.GasPrice, 100), Value: common.Big0, } - for atomic.LoadInt32(&stopBackground) == 0 { - txData.Nonce = info.Nonce + for stopBackground.Load() == 0 { + txData.Nonce = info.Nonce.Load() var expectedErr string if user%3 == 0 { txData.Data = noopId - info.Nonce += 1 + info.Nonce.Add(1) } else if user%3 == 1 { txData.Data = revertId expectedErr = "execution reverted: SOLIDITY_REVERTING" @@ -116,7 +116,7 @@ func TestSequencerRejection(t *testing.T) { } } - atomic.StoreInt32(&stopBackground, 1) + stopBackground.Store(1) wg.Wait() header1, err := builderSeq.L2.Client.HeaderByNumber(ctx, nil) diff --git a/system_tests/test_info.go b/system_tests/test_info.go index 764a8ae396..fe4ff50175 100644 --- a/system_tests/test_info.go +++ b/system_tests/test_info.go @@ -29,7 +29,7 @@ var simulatedChainID = big.NewInt(1337) type AccountInfo struct { Address common.Address PrivateKey *ecdsa.PrivateKey - Nonce uint64 + Nonce atomic.Uint64 } type BlockchainTestInfo struct { @@ -177,7 +177,7 @@ func (b *BlockchainTestInfo) GetDefaultTransactOpts(name string, ctx context.Con if err != nil { return nil, err } - atomic.AddUint64(&info.Nonce, 1) // we don't set Nonce, but try to keep track.. + info.Nonce.Add(1) // we don't set Nonce, but try to keep track.. return tx.WithSignature(b.Signer, signature) }, GasMargin: 2000, // adjust by 20% @@ -215,7 +215,7 @@ func (b *BlockchainTestInfo) PrepareTxTo( ) *types.Transaction { b.T.Helper() info := b.GetInfoWithPrivKey(from) - txNonce := atomic.AddUint64(&info.Nonce, 1) - 1 + txNonce := info.Nonce.Add(1) - 1 if value == nil { value = common.Big0 } diff --git a/util/containers/promise.go b/util/containers/promise.go index a180c094eb..d336c4b964 100644 --- a/util/containers/promise.go +++ b/util/containers/promise.go @@ -20,7 +20,7 @@ type Promise[R any] struct { chanReady chan struct{} result R err error - produced uint32 + produced atomic.Uint32 cancel func() } @@ -67,7 +67,7 @@ func (p *Promise[R]) Cancel() { } func (p *Promise[R]) ProduceErrorSafe(err error) error { - if !atomic.CompareAndSwapUint32(&p.produced, 0, 1) { + if !p.produced.CompareAndSwap(0, 1) { return errors.New("cannot produce two values") } p.err = err @@ -83,7 +83,7 @@ func (p *Promise[R]) ProduceError(err error) { } func (p *Promise[R]) ProduceSafe(value R) error { - if !atomic.CompareAndSwapUint32(&p.produced, 0, 1) { + if !p.produced.CompareAndSwap(0, 1) { return errors.New("cannot produce two values") } p.result = value diff --git a/util/containers/promise_test.go b/util/containers/promise_test.go index 61ee4f2c3a..eeb9c9089c 100644 --- a/util/containers/promise_test.go +++ b/util/containers/promise_test.go @@ -25,8 +25,8 @@ func TestPromise(t *testing.T) { t.Fatal("unexpected Promise.Current when ready") } - cancelCalled := int64(0) - cancelFunc := func() { atomic.AddInt64(&cancelCalled, 1) } + var cancelCalled atomic.Int64{} + cancelFunc := func() { cancelCalled.Add(1) } tempPromise = NewPromise[int](cancelFunc) res, err = tempPromise.Current() @@ -68,12 +68,12 @@ func TestPromise(t *testing.T) { t.Fatal("unexpected Promise.Current 2nd time") } - if atomic.LoadInt64(&cancelCalled) != 0 { + if cancelCalled.Load() != 0 { t.Fatal("cancel called by await/current when it shouldn't be") } tempPromise.Cancel() - if atomic.LoadInt64(&cancelCalled) != 0 { + if cancelCalled.Load() != 0 { t.Fatal("cancel called after error produced") } @@ -84,11 +84,11 @@ func TestPromise(t *testing.T) { if res != 0 || !errors.Is(err, context.DeadlineExceeded) { t.Fatal("unexpected Promise.Await with timeout") } - if atomic.LoadInt64(&cancelCalled) != 1 { + if cancelCalled.Load() != 1 { t.Fatal("cancel not called by await on timeout") } tempPromise.Cancel() - if atomic.LoadInt64(&cancelCalled) != 2 { + if cancelCalled.Load() != 2 { t.Fatal("cancel not called by promise.Cancel") } } diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 56aebef396..e171e6e99b 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -77,7 +77,7 @@ type RpcClient struct { config ClientConfigFetcher client *rpc.Client autoStack *node.Node - logId uint64 + logId atomic.Uint64 } func NewRpcClient(config ClientConfigFetcher, stack *node.Node) *RpcClient { @@ -154,7 +154,7 @@ func (c *RpcClient) CallContext(ctx_in context.Context, result interface{}, meth if c.client == nil { return errors.New("not connected") } - logId := atomic.AddUint64(&c.logId, 1) + logId := c.logId.Add(1) log.Trace("sending RPC request", "method", method, "logId", logId, "args", limitedArgumentsMarshal{int(c.config().ArgLogLimit), args}) var err error for i := 0; i < int(c.config().Retries)+1; i++ { diff --git a/util/rpcclient/rpcclient_test.go b/util/rpcclient/rpcclient_test.go index 8613671d37..ae7e4fc226 100644 --- a/util/rpcclient/rpcclient_test.go +++ b/util/rpcclient/rpcclient_test.go @@ -46,10 +46,13 @@ func createTestNode(t *testing.T, ctx context.Context, stuckOrFailed int64) *nod stack, err := node.New(&stackConf) Require(t, err) + service := &testAPI{} + service.stuckCalls.Store(stuckOrFailed) + service.failedCalls.Store(stuckOrFailed) testAPIs := []rpc.API{{ Namespace: "test", Version: "1.0", - Service: &testAPI{stuckOrFailed, stuckOrFailed}, + Service: service, Public: true, Authenticated: false, }} @@ -67,12 +70,12 @@ func createTestNode(t *testing.T, ctx context.Context, stuckOrFailed int64) *nod } type testAPI struct { - stuckCalls int64 - failedCalls int64 + stuckCalls atomic.Int64 + failedCalls atomic.Int64 } func (t *testAPI) StuckAtFirst(ctx context.Context) error { - stuckRemaining := atomic.AddInt64(&t.stuckCalls, -1) + 1 + stuckRemaining := t.stuckCalls.Add(-1) + 1 if stuckRemaining <= 0 { return nil } @@ -81,7 +84,7 @@ func (t *testAPI) StuckAtFirst(ctx context.Context) error { } func (t *testAPI) FailAtFirst(ctx context.Context) error { - failedRemaining := atomic.AddInt64(&t.failedCalls, -1) + 1 + failedRemaining := t.failedCalls.Add(-1) + 1 if failedRemaining <= 0 { return nil } diff --git a/validator/client/redis/producer.go b/validator/client/redis/producer.go index 4aa4031350..7594421f9a 100644 --- a/validator/client/redis/producer.go +++ b/validator/client/redis/producer.go @@ -58,7 +58,7 @@ func ValidationClientConfigAddOptions(prefix string, f *pflag.FlagSet) { type ValidationClient struct { stopwaiter.StopWaiter name string - room int32 + room atomic.Int32 // producers stores moduleRoot to producer mapping. producers map[common.Hash]*pubsub.Producer[*validator.ValidationInput, validator.GoGlobalState] producerConfig pubsub.ProducerConfig @@ -75,14 +75,15 @@ func NewValidationClient(cfg *ValidationClientConfig) (*ValidationClient, error) if err != nil { return nil, err } - return &ValidationClient{ + validationClient := &ValidationClient{ name: cfg.Name, - room: cfg.Room, producers: make(map[common.Hash]*pubsub.Producer[*validator.ValidationInput, validator.GoGlobalState]), producerConfig: cfg.ProducerConfig, redisClient: redisClient, createStreams: cfg.CreateStreams, - }, nil + } + validationClient.room.Store(cfg.Room) + return validationClient, nil } func (c *ValidationClient) Initialize(ctx context.Context, moduleRoots []common.Hash) error { @@ -114,8 +115,8 @@ func (c *ValidationClient) WasmModuleRoots() ([]common.Hash, error) { } func (c *ValidationClient) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { - atomic.AddInt32(&c.room, -1) - defer atomic.AddInt32(&c.room, 1) + c.room.Add(-1) + defer c.room.Add(1) producer, found := c.producers[moduleRoot] if !found { errPromise := containers.NewReadyPromise(validator.GoGlobalState{}, fmt.Errorf("no validation is configured for wasm root %v", moduleRoot)) @@ -152,5 +153,5 @@ func (c *ValidationClient) Name() string { } func (c *ValidationClient) Room() int { - return int(c.room) + return int(c.room.Load()) } diff --git a/validator/client/validation_client.go b/validator/client/validation_client.go index fa6b9000f2..c8a071cc14 100644 --- a/validator/client/validation_client.go +++ b/validator/client/validation_client.go @@ -29,7 +29,7 @@ type ValidationClient struct { stopwaiter.StopWaiter client *rpcclient.RpcClient name string - room int32 + room atomic.Int32 wasmModuleRoots []common.Hash } @@ -40,12 +40,12 @@ func NewValidationClient(config rpcclient.ClientConfigFetcher, stack *node.Node) } func (c *ValidationClient) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { - atomic.AddInt32(&c.room, -1) + c.room.Add(-1) promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](c, func(ctx context.Context) (validator.GoGlobalState, error) { input := server_api.ValidationInputToJson(entry) var res validator.GoGlobalState err := c.client.CallContext(ctx, &res, server_api.Namespace+"_validate", input, moduleRoot) - atomic.AddInt32(&c.room, 1) + c.room.Add(1) return res, err }) return server_common.NewValRun(promise, moduleRoot) @@ -81,7 +81,7 @@ func (c *ValidationClient) Start(ctx_in context.Context) error { } else { log.Info("connected to validation server", "name", name, "room", room) } - atomic.StoreInt32(&c.room, int32(room)) + c.room.Store(int32(room)) c.wasmModuleRoots = moduleRoots c.name = name return nil @@ -106,7 +106,7 @@ func (c *ValidationClient) Name() string { } func (c *ValidationClient) Room() int { - room32 := atomic.LoadInt32(&c.room) + room32 := c.room.Load() if room32 < 0 { return 0 } diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index cffd3db0ee..adca9695e2 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -59,7 +59,7 @@ type ArbitratorMachine struct { var _ MachineInterface = (*ArbitratorMachine)(nil) var preimageResolvers containers.SyncMap[int64, GoPreimageResolver] -var lastPreimageResolverId int64 // atomic +var lastPreimageResolverId atomic.Int64 // atomic // Any future calls to this machine will result in a panic func (m *ArbitratorMachine) Destroy() { @@ -382,7 +382,7 @@ func (m *ArbitratorMachine) SetPreimageResolver(resolver GoPreimageResolver) err if m.frozen { return errors.New("machine frozen") } - id := atomic.AddInt64(&lastPreimageResolverId, 1) + id := lastPreimageResolverId.Add(1) preimageResolvers.Store(id, resolver) m.contextId = &id runtime.SetFinalizer(m.contextId, freeContextId) diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index dca15c369e..7b9293f7bd 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -59,7 +59,7 @@ func DefaultArbitratorSpawnerConfigFetcher() *ArbitratorSpawnerConfig { type ArbitratorSpawner struct { stopwaiter.StopWaiter - count int32 + count atomic.Int32 locator *server_common.MachineLocator machineLoader *ArbMachineLoader config ArbitratorSpawnerConfigFecher @@ -176,9 +176,9 @@ func (v *ArbitratorSpawner) execute( } func (v *ArbitratorSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { - atomic.AddInt32(&v.count, 1) + v.count.Add(1) promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](v, func(ctx context.Context) (validator.GoGlobalState, error) { - defer atomic.AddInt32(&v.count, -1) + defer v.count.Add(-1) return v.execute(ctx, entry, moduleRoot) }) return server_common.NewValRun(promise, moduleRoot) diff --git a/validator/server_jit/spawner.go b/validator/server_jit/spawner.go index 703e761af5..eda74b2911 100644 --- a/validator/server_jit/spawner.go +++ b/validator/server_jit/spawner.go @@ -39,7 +39,7 @@ func JitSpawnerConfigAddOptions(prefix string, f *flag.FlagSet) { type JitSpawner struct { stopwaiter.StopWaiter - count int32 + count atomic.Int32 locator *server_common.MachineLocator machineLoader *JitMachineLoader config JitSpawnerConfigFecher @@ -91,9 +91,9 @@ func (s *JitSpawner) Name() string { } func (v *JitSpawner) Launch(entry *validator.ValidationInput, moduleRoot common.Hash) validator.ValidationRun { - atomic.AddInt32(&v.count, 1) + v.count.Add(1) promise := stopwaiter.LaunchPromiseThread[validator.GoGlobalState](v, func(ctx context.Context) (validator.GoGlobalState, error) { - defer atomic.AddInt32(&v.count, -1) + defer v.count.Add(-1) return v.execute(ctx, entry, moduleRoot) }) return server_common.NewValRun(promise, moduleRoot) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index ba70756c98..7fb88bf9eb 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -51,7 +51,7 @@ type ClientConnection struct { requestedSeqNum arbutil.MessageIndex LastSentSeqNum atomic.Uint64 - lastHeardUnix int64 + lastHeardUnix atomic.Int64 out chan message backlog backlog.Backlog registered chan bool @@ -74,7 +74,7 @@ func NewClientConnection( delay time.Duration, bklg backlog.Backlog, ) *ClientConnection { - return &ClientConnection{ + clientConnection := &ClientConnection{ conn: conn, clientIp: connectingIP, desc: desc, @@ -82,7 +82,6 @@ func NewClientConnection( Name: fmt.Sprintf("%s@%s-%d", connectingIP, conn.RemoteAddr(), rand.Intn(10)), clientAction: clientAction, requestedSeqNum: requestedSeqNum, - lastHeardUnix: time.Now().Unix(), out: make(chan message, maxSendQueue), compression: compression, flateReader: NewFlateReader(), @@ -91,6 +90,8 @@ func NewClientConnection( registered: make(chan bool, 1), backlogSent: false, } + clientConnection.lastHeardUnix.Store(time.Now().Unix()) + return clientConnection } func (cc *ClientConnection) Age() time.Duration { @@ -287,7 +288,7 @@ func (cc *ClientConnection) RequestedSeqNum() arbutil.MessageIndex { } func (cc *ClientConnection) GetLastHeard() time.Time { - return time.Unix(atomic.LoadInt64(&cc.lastHeardUnix), 0) + return time.Unix(cc.lastHeardUnix.Load(), 0) } // Receive reads next message from client's underlying connection. @@ -307,7 +308,7 @@ func (cc *ClientConnection) readRequest(ctx context.Context, timeout time.Durati cc.ioMutex.Lock() defer cc.ioMutex.Unlock() - atomic.StoreInt64(&cc.lastHeardUnix, time.Now().Unix()) + cc.lastHeardUnix.Store(time.Now().Unix()) var data []byte var opCode ws.OpCode diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index a88716756a..4b7b7a2bcb 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -44,7 +44,7 @@ type ClientManager struct { stopwaiter.StopWaiter clientPtrMap map[*ClientConnection]bool - clientCount int32 + clientCount atomic.Int32 pool *gopool.Pool poller netpoll.Poller broadcastChan chan *m.BroadcastMessage @@ -85,7 +85,7 @@ func (cm *ClientManager) registerClient(ctx context.Context, clientConnection *C clientsCurrentGauge.Inc(1) clientsConnectCount.Inc(1) - atomic.AddInt32(&cm.clientCount, 1) + cm.clientCount.Add(1) cm.clientPtrMap[clientConnection] = true clientsTotalSuccessCounter.Inc(1) @@ -120,7 +120,7 @@ func (cm *ClientManager) removeClientImpl(clientConnection *ClientConnection) { clientsDurationHistogram.Update(clientConnection.Age().Microseconds()) clientsCurrentGauge.Dec(1) clientsDisconnectCount.Inc(1) - atomic.AddInt32(&cm.clientCount, -1) + cm.clientCount.Add(-1) } func (cm *ClientManager) removeClient(clientConnection *ClientConnection) { @@ -137,7 +137,7 @@ func (cm *ClientManager) removeClient(clientConnection *ClientConnection) { } func (cm *ClientManager) ClientCount() int32 { - return atomic.LoadInt32(&cm.clientCount) + return cm.clientCount.Load() } // Broadcast sends batch item to all clients. From ebd287e06b98cf2b1944922e10ed737a1afc7565 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 1 Jul 2024 18:22:21 +0530 Subject: [PATCH 15/69] fix tests and lint --- arbnode/simple_redis_lock_test.go | 8 ++++---- system_tests/common_test.go | 1 - system_tests/conditionaltx_test.go | 4 ++-- system_tests/seqfeed_test.go | 2 +- system_tests/staker_test.go | 2 +- system_tests/test_info.go | 8 +++++--- system_tests/twonodeslong_test.go | 2 +- util/containers/promise_test.go | 2 +- 8 files changed, 15 insertions(+), 14 deletions(-) diff --git a/arbnode/simple_redis_lock_test.go b/arbnode/simple_redis_lock_test.go index 25805de0de..72f3dfa837 100644 --- a/arbnode/simple_redis_lock_test.go +++ b/arbnode/simple_redis_lock_test.go @@ -83,10 +83,10 @@ func simpleRedisLockTest(t *testing.T, redisKeySuffix string, chosen int, backgo } wg.Wait() successful := -1 - for i, counter := range counters { - if counter.Load() != 0 { - if counter.Load() != test_attempts { - t.Fatalf("counter %d value %d", i, counter) + for i := range counters { + if counters[i].Load() != 0 { + if counters[i].Load() != test_attempts { + t.Fatalf("counter %d value %d", i, counters[i].Load()) } if successful > 0 { t.Fatalf("counter %d and %d both positive", i, successful) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 1c45802b54..f380386ac9 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -317,7 +317,6 @@ func BridgeBalance( l2info.SetFullAccountInfo(account, &AccountInfo{ Address: l1acct.Address, PrivateKey: l1acct.PrivateKey, - Nonce: 0, }) } else { l2acct := l2info.GetInfoWithPrivKey(account) diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index 4f800d9769..286060e666 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -58,7 +58,7 @@ func testConditionalTxThatShouldSucceed(t *testing.T, ctx context.Context, idx i func testConditionalTxThatShouldFail(t *testing.T, ctx context.Context, idx int, l2info info, rpcClient *rpc.Client, options *arbitrum_types.ConditionalOptions, expectedErrorCode int) { t.Helper() accountInfo := l2info.GetInfoWithPrivKey("Owner") - nonce := accountInfo.Nonce + nonce := accountInfo.Nonce.Load() tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, big.NewInt(1e12), nil) err := arbitrum.SendConditionalTransactionRPC(ctx, rpcClient, tx, options) if err == nil { @@ -77,7 +77,7 @@ func testConditionalTxThatShouldFail(t *testing.T, ctx context.Context, idx int, Fatal(t, "unexpected error type, err:", err) } } - accountInfo.Nonce = nonce // revert nonce as the tx failed + accountInfo.Nonce.Store(nonce) // revert nonce as the tx failed } func getEmptyOptions(address common.Address) []*arbitrum_types.ConditionalOptions { diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index ab30598b60..e3a98b4961 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -192,7 +192,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { builder.L2Info.GenerateAccount("RealUser") fraudTx := builder.L2Info.PrepareTx("Owner", "FraudUser", builder.L2Info.TransferGas, big.NewInt(1e12), nil) - builder.L2Info.GetInfoWithPrivKey("Owner").Nonce -= 1 // Use same l2info object for different l2s + builder.L2Info.GetInfoWithPrivKey("Owner").Nonce.Add(^uint64(0)) // Use same l2info object for different l2s realTx := builder.L2Info.PrepareTx("Owner", "RealUser", builder.L2Info.TransferGas, big.NewInt(1e12), nil) for i := 0; i < 10; i++ { diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 4afe2e8ccd..52f16614f7 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -42,7 +42,7 @@ import ( func makeBackgroundTxs(ctx context.Context, builder *NodeBuilder) error { for i := uint64(0); ctx.Err() == nil; i++ { - builder.L2Info.Accounts["BackgroundUser"].Nonce = i + builder.L2Info.Accounts["BackgroundUser"].Nonce.Store(i) tx := builder.L2Info.PrepareTx("BackgroundUser", "BackgroundUser", builder.L2Info.TransferGas, common.Big0, nil) err := builder.L2.Client.SendTransaction(ctx, tx) if err != nil { diff --git a/system_tests/test_info.go b/system_tests/test_info.go index fe4ff50175..a4fbb44c44 100644 --- a/system_tests/test_info.go +++ b/system_tests/test_info.go @@ -93,7 +93,6 @@ func (b *BlockchainTestInfo) GenerateAccount(name string) { b.Accounts[name] = &AccountInfo{ PrivateKey: privateKey, Address: crypto.PubkeyToAddress(privateKey.PublicKey), - Nonce: 0, } log.Info("New Key ", "name", name, "Address", b.Accounts[name].Address) } @@ -139,8 +138,11 @@ func (b *BlockchainTestInfo) SetContract(name string, address common.Address) { } func (b *BlockchainTestInfo) SetFullAccountInfo(name string, info *AccountInfo) { - infoCopy := *info - b.Accounts[name] = &infoCopy + b.Accounts[name] = &AccountInfo{ + Address: info.Address, + PrivateKey: info.PrivateKey, + } + b.Accounts[name].Nonce.Store(info.Nonce.Load()) } func (b *BlockchainTestInfo) GetAddress(name string) common.Address { diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index ce3244462f..83cd975dd8 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -120,7 +120,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { } } // create bad tx on delayed inbox - builder.L2Info.GetInfoWithPrivKey("ErrorTxSender").Nonce = 10 + builder.L2Info.GetInfoWithPrivKey("ErrorTxSender").Nonce.Store(10) builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ WrapL2ForDelayed(t, builder.L2Info.PrepareTx("ErrorTxSender", "DelayedReceiver", 30002, delayedFaucetNeeds, nil), builder.L1Info, "User", 100000), }) diff --git a/util/containers/promise_test.go b/util/containers/promise_test.go index eeb9c9089c..3e41a6465d 100644 --- a/util/containers/promise_test.go +++ b/util/containers/promise_test.go @@ -25,7 +25,7 @@ func TestPromise(t *testing.T) { t.Fatal("unexpected Promise.Current when ready") } - var cancelCalled atomic.Int64{} + var cancelCalled atomic.Int64 cancelFunc := func() { cancelCalled.Add(1) } tempPromise = NewPromise[int](cancelFunc) From baf74fcebf923ed098028619ee4bdc868fbacd84 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Mon, 1 Jul 2024 18:31:00 +0530 Subject: [PATCH 16/69] swap with atomic.bool --- util/containers/promise.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/containers/promise.go b/util/containers/promise.go index d336c4b964..54f0df195d 100644 --- a/util/containers/promise.go +++ b/util/containers/promise.go @@ -20,7 +20,7 @@ type Promise[R any] struct { chanReady chan struct{} result R err error - produced atomic.Uint32 + produced atomic.Bool cancel func() } @@ -67,7 +67,7 @@ func (p *Promise[R]) Cancel() { } func (p *Promise[R]) ProduceErrorSafe(err error) error { - if !p.produced.CompareAndSwap(0, 1) { + if !p.produced.CompareAndSwap(false, true) { return errors.New("cannot produce two values") } p.err = err @@ -83,7 +83,7 @@ func (p *Promise[R]) ProduceError(err error) { } func (p *Promise[R]) ProduceSafe(value R) error { - if !p.produced.CompareAndSwap(0, 1) { + if !p.produced.CompareAndSwap(false, true) { return errors.New("cannot produce two values") } p.result = value From 947af3099059ba58a13c66797fcdfa34e8c419b2 Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Fri, 28 Jun 2024 18:45:03 +0800 Subject: [PATCH 17/69] refactor: breakdown query to eth_getLogs --- staker/l1_validator.go | 2 +- staker/rollup_watcher.go | 24 ++++++++++++++++++------ staker/staker.go | 4 ++++ 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index d68365ede0..14c46b5ddb 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -381,7 +381,7 @@ func (v *L1Validator) generateNodeAction( return nil, false, nil } - successorNodes, err := v.rollup.LookupNodeChildren(ctx, stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash) + successorNodes, err := v.rollup.LookupNodeChildren(ctx, stakerInfo.LatestStakedNode, stakerConfig.LogQueryRange, stakerInfo.LatestStakedNodeHash) if err != nil { return nil, false, fmt.Errorf("error looking up node %v (hash %v) children: %w", stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash, err) } diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 118ce15b44..1fbc783b47 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" ) var rollupInitializedID common.Hash @@ -165,7 +166,7 @@ func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInf }, nil } -func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, nodeHash common.Hash) ([]*NodeInfo, error) { +func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, logQueryRange uint64, nodeHash common.Hash) ([]*NodeInfo, error) { node, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), nodeNum) if err != nil { return nil, err @@ -180,17 +181,28 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, Addresses: []common.Address{r.address}, Topics: [][]common.Hash{{nodeCreatedID}, nil, {nodeHash}}, } - query.FromBlock, err = r.getNodeCreationBlock(ctx, nodeNum) + fromBlock, err := r.getNodeCreationBlock(ctx, nodeNum) if err != nil { return nil, err } - query.ToBlock, err = r.getNodeCreationBlock(ctx, node.LatestChildNumber) + toBlock, err := r.getNodeCreationBlock(ctx, node.LatestChildNumber) if err != nil { return nil, err } - logs, err := r.client.FilterLogs(ctx, query) - if err != nil { - return nil, err + var logs []types.Log + // break down the query to avoid eth_getLogs query limit + for toBlock.Cmp(fromBlock) > 0 { + query.FromBlock = fromBlock + query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(logQueryRange)) + if query.ToBlock.Cmp(toBlock) > 0 { + query.ToBlock = toBlock + } + segment, err := r.client.FilterLogs(ctx, query) + if err != nil { + return nil, err + } + logs = append(logs, segment) + fromBlock = new(big.Int).Add(query.ToBlock, big.NewInt(1)) } infos := make([]*NodeInfo, 0, len(logs)) lastHash := nodeHash diff --git a/staker/staker.go b/staker/staker.go index da6413e122..f7f412ca16 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -90,6 +90,7 @@ type L1ValidatorConfig struct { ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` Dangerous DangerousConfig `koanf:"dangerous"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + LogQueryRange uint64 `koanf:"log-query-range" reload:"hot"` strategy StakerStrategy gasRefunder common.Address @@ -156,6 +157,7 @@ var DefaultL1ValidatorConfig = L1ValidatorConfig{ ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, + LogQueryRange: 999, } var TestL1ValidatorConfig = L1ValidatorConfig{ @@ -176,6 +178,7 @@ var TestL1ValidatorConfig = L1ValidatorConfig{ ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, + LogQueryRange: 999, } var DefaultValidatorL1WalletConfig = genericconf.WalletConfig{ @@ -201,6 +204,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") + f.Uint64(prefix+".log-query-range", DefaultL1ValidatorConfig.LogQueryRange, "range ro query from eth_getLogs") dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) From ea08ba547fc4ad87b6cbef3ebc44712b481a98d0 Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Fri, 28 Jun 2024 18:51:20 +0800 Subject: [PATCH 18/69] fix: array append --- staker/rollup_watcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 1fbc783b47..6d871be24e 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -201,7 +201,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, if err != nil { return nil, err } - logs = append(logs, segment) + logs = append(logs, segment...) fromBlock = new(big.Int).Add(query.ToBlock, big.NewInt(1)) } infos := make([]*NodeInfo, 0, len(logs)) From 40ff63075e231db86a7dde9d3e9b3876bf98495b Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Fri, 28 Jun 2024 18:55:25 +0800 Subject: [PATCH 19/69] fix: log query range type error --- staker/rollup_watcher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 6d871be24e..4ff8db0ce3 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -193,7 +193,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, // break down the query to avoid eth_getLogs query limit for toBlock.Cmp(fromBlock) > 0 { query.FromBlock = fromBlock - query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(logQueryRange)) + query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRange))) if query.ToBlock.Cmp(toBlock) > 0 { query.ToBlock = toBlock } From b017e98eb9936fb0f3e12f24250bfaf2f7bc6ab9 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 3 Jul 2024 11:46:04 -0700 Subject: [PATCH 20/69] Add submodule pin check to CI This checks that submodules are ancestors of origin/HEAD so that non-merged branches aren't accidentally checked in. --- .github/workflows/submodule-pin-check.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 .github/workflows/submodule-pin-check.yml diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml new file mode 100644 index 0000000000..43984f1158 --- /dev/null +++ b/.github/workflows/submodule-pin-check.yml @@ -0,0 +1,15 @@ +name: Merge Checks + +on: + pull_request: + branches: [ master ] + types: [synchronize, opened, reopened, labeled, unlabeled] + +jobs: + submodule-pin-check: + name: Submodule Pin Check + runs-on: ubuntu-latest + steps: + - name: Check all submodules are under origin/HEAD + run: git submodule foreach git merge-base --is-ancestor HEAD origin/HEAD + From b64dc4fcc611592653d7b3f7b032afb0bb780ebf Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 3 Jul 2024 12:02:50 -0700 Subject: [PATCH 21/69] Add checkout step --- .github/workflows/submodule-pin-check.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 43984f1158..050eea4b25 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -10,6 +10,9 @@ jobs: name: Submodule Pin Check runs-on: ubuntu-latest steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Check all submodules are under origin/HEAD run: git submodule foreach git merge-base --is-ancestor HEAD origin/HEAD From 9e45391518493f4bfebd0e52e207572b0db9fb8d Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 3 Jul 2024 12:05:41 -0700 Subject: [PATCH 22/69] Check out submodules --- .github/workflows/submodule-pin-check.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 050eea4b25..86a69c94cd 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -12,6 +12,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + with: + submodules: recursive - name: Check all submodules are under origin/HEAD run: git submodule foreach git merge-base --is-ancestor HEAD origin/HEAD From 30870b5a05af710cf56d03208ecd17bb626ae2db Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 3 Jul 2024 12:12:16 -0700 Subject: [PATCH 23/69] Update arbitrator/langs/bf pin --- arbitrator/langs/bf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbitrator/langs/bf b/arbitrator/langs/bf index 062b87bad1..cb5750580f 160000 --- a/arbitrator/langs/bf +++ b/arbitrator/langs/bf @@ -1 +1 @@ -Subproject commit 062b87bad1ec00d42b9cc2b5ee41e63cd6ff1cbb +Subproject commit cb5750580f6990b5166ffce83de11b766a25ca31 From bbd649e664d4a1c47b0b09986015c6a548ddb12c Mon Sep 17 00:00:00 2001 From: Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com> Date: Wed, 3 Jul 2024 15:55:22 -0700 Subject: [PATCH 24/69] Update .github/workflows/submodule-pin-check.yml Co-authored-by: Gabriel de Quadros Ligneul <8294320+gligneul@users.noreply.github.com> --- .github/workflows/submodule-pin-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 86a69c94cd..ac0b8fba89 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -1,4 +1,4 @@ -name: Merge Checks +name: Submodule Pin Check on: pull_request: From c3999026cff51e75226072a300467cdc7f07126a Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 8 Jul 2024 04:44:35 -0700 Subject: [PATCH 25/69] Remove check for labeled/unlabeled --- .github/workflows/submodule-pin-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index ac0b8fba89..b37cc7b9d5 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -3,7 +3,7 @@ name: Submodule Pin Check on: pull_request: branches: [ master ] - types: [synchronize, opened, reopened, labeled, unlabeled] + types: [synchronize, opened, reopened] jobs: submodule-pin-check: From a1257f0fd4dd0019e88d6bea15be1e9fc9407d82 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 8 Jul 2024 09:05:34 -0500 Subject: [PATCH 26/69] Address Feedback on BOLD Cache PR --- staker/challenge-cache/cache.go | 77 ++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 35 deletions(-) diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index 8cca4bb835..5ee775d3b9 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -10,7 +10,7 @@ store them in a filesystem cache to avoid recomputing them and for hierarchical Each file contains a list of 32 byte hashes, concatenated together as bytes. Using this structure, we can namespace hashes by message number and by challenge level. -Once a validator receives a full list of computed machine hashes for the first time from a validatio node, +Once a validator receives a full list of computed machine hashes for the first time from a validation node, it will write the hashes to this filesystem hierarchy for fast access next time these hashes are needed. Example uses: @@ -18,7 +18,7 @@ Example uses: - Obtain all the hashes from step 100 to 101 at subchallenge level 1 for the execution of message num 70. wavm-module-root-0xab/ - rollup-block-hash-0x12...-message-num-70/ + message-num-70-rollup-block-hash-0x12.../ hashes.bin subchallenge-level-1-big-step-100/ hashes.bin @@ -31,6 +31,7 @@ package challengecache import ( "bufio" + "context" "errors" "fmt" "io" @@ -42,7 +43,7 @@ import ( ) var ( - ErrNotFoundInCache = errors.New("no found in challenge cache") + ErrNotFoundInCache = errors.New("not found in challenge cache") ErrFileAlreadyExists = errors.New("file already exists") ErrNoHashes = errors.New("no hashes being written") hashesFileName = "hashes.bin" @@ -59,30 +60,46 @@ type HistoryCommitmentCacher interface { Put(lookup *Key, hashes []common.Hash) error } +// Key for cache lookups includes the wavm module root of a challenge, as well +// as the heights for messages and big steps as needed. +type Key struct { + RollupBlockHash common.Hash + WavmModuleRoot common.Hash + MessageHeight uint64 + StepHeights []uint64 +} + // Cache for history commitments on disk. type Cache struct { - baseDir string + baseDir string + tempWritesDir string } // New cache from a base directory path. func New(baseDir string) (*Cache, error) { - if _, err := os.Stat(baseDir); err != nil { - if err := os.MkdirAll(baseDir, os.ModePerm); err != nil { - return nil, fmt.Errorf("could not make base cache directory %s: %w", baseDir, err) - } - } return &Cache{ - baseDir: baseDir, + baseDir: baseDir, + tempWritesDir: "", }, nil } -// Key for cache lookups includes the wavm module root of a challenge, as well -// as the heights for messages and big steps as needed. -type Key struct { - RollupBlockHash common.Hash - WavmModuleRoot common.Hash - MessageHeight uint64 - StepHeights []uint64 +// Init a cache by verifying its base directory exists. +func (c *Cache) Init(_ context.Context) error { + if _, err := os.Stat(c.baseDir); err != nil { + if err := os.MkdirAll(c.baseDir, os.ModePerm); err != nil { + return fmt.Errorf("could not make initialize challenge cache directory %s: %w", c.baseDir, err) + } + } + // We create a temp directory to write our hashes to first when putting to the cache. + // Once writing succeeds, we rename in an atomic operation to the correct file name + // in the cache directory hierarchy in the `Put` function. All of these temporary writes + // will occur in a subdir of the base directory called temp. + tempWritesDir, err := os.MkdirTemp(c.baseDir, "temp") + if err != nil { + return err + } + c.tempWritesDir = tempWritesDir + return nil } // Get a list of hashes from the cache from index 0 up to a certain index. Hashes are saved as files in the directory @@ -122,24 +139,14 @@ func (c *Cache) Put(lookup *Key, hashes []common.Hash) error { if len(hashes) == 0 { return ErrNoHashes } - fName, err := determineFilePath(c.baseDir, lookup) - if err != nil { - return err + if c.tempWritesDir == "" { + return fmt.Errorf("cache not initialized by calling .Init(ctx)") } - // We create a tmp file to write our hashes to first. If writing fails, - // we don't want to leave a half-written file in our cache directory. - // Once writing succeeds, we rename in an atomic operation to the correct file name - // in the cache directory hierarchy. - tmp, err := os.MkdirTemp(c.baseDir, "tmpdir") + fName, err := determineFilePath(c.baseDir, lookup) if err != nil { return err } - tmpFName := filepath.Join(tmp, fName) - dir := filepath.Dir(tmpFName) - if err := os.MkdirAll(dir, os.ModePerm); err != nil { - return fmt.Errorf("could not make tmp directory %s: %w", dir, err) - } - f, err := os.Create(tmpFName) + f, err := os.CreateTemp(c.tempWritesDir, fmt.Sprintf("%s-*", hashesFileName)) if err != nil { return err } @@ -154,11 +161,11 @@ func (c *Cache) Put(lookup *Key, hashes []common.Hash) error { if err := os.MkdirAll(filepath.Dir(fName), os.ModePerm); err != nil { return fmt.Errorf("could not make file directory %s: %w", fName, err) } - // If the file writing was successful, we rename the file from the tmp directory + // If the file writing was successful, we rename the file from the temp directory // into our cache directory. This is an atomic operation. // For more information on this atomic write pattern, see: // https://stackoverflow.com/questions/2333872/how-to-make-file-creation-an-atomic-operation - return os.Rename(tmpFName /*old */, fName /* new */) + return os.Rename(f.Name() /*old */, fName /* new */) } // Reads 32 bytes at a time from a reader up to a specified height. If none, then read all. @@ -217,7 +224,7 @@ for the data requested within the cache directory hierarchy. The folder structur for a given filesystem challenge cache will look as follows: wavm-module-root-0xab/ - rollup-block-hash-0x12...-message-num-70/ + message-num-70-rollup-block-hash-0x12.../ hashes.bin subchallenge-level-1-big-step-100/ hashes.bin @@ -225,7 +232,7 @@ for a given filesystem challenge cache will look as follows: func determineFilePath(baseDir string, lookup *Key) (string, error) { key := make([]string, 0) key = append(key, fmt.Sprintf("%s-%s", wavmModuleRootPrefix, lookup.WavmModuleRoot.Hex())) - key = append(key, fmt.Sprintf("%s-%s-%s-%d", rollupBlockHashPrefix, lookup.RollupBlockHash.Hex(), messageNumberPrefix, lookup.MessageHeight)) + key = append(key, fmt.Sprintf("%s-%d-%s-%s", messageNumberPrefix, lookup.MessageHeight, rollupBlockHashPrefix, lookup.RollupBlockHash.Hex())) for challengeLevel, height := range lookup.StepHeights { key = append(key, fmt.Sprintf( "%s-%d-%s-%d", From 90bd2930d867af18848dada5053296008709b123 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 8 Jul 2024 09:07:23 -0500 Subject: [PATCH 27/69] tests passing --- staker/challenge-cache/cache_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go index 6b15d62af7..765b758571 100644 --- a/staker/challenge-cache/cache_test.go +++ b/staker/challenge-cache/cache_test.go @@ -4,6 +4,7 @@ package challengecache import ( "bytes" + "context" "errors" "fmt" "io" @@ -17,6 +18,7 @@ import ( var _ HistoryCommitmentCacher = (*Cache)(nil) func TestCache(t *testing.T) { + ctx := context.Background() basePath := t.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { t.Fatal(err) @@ -30,6 +32,9 @@ func TestCache(t *testing.T) { if err != nil { t.Fatal(err) } + if err = cache.Init(ctx); err != nil { + t.Fatal(err) + } key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 0, @@ -255,7 +260,7 @@ func Test_determineFilePath(t *testing.T) { StepHeights: []uint64{50}, }, }, - want: "wavm-module-root-0x0000000000000000000000000000000000000000000000000000000000000000/rollup-block-hash-0x0000000000000000000000000000000000000000000000000000000000000000-message-num-100/subchallenge-level-1-big-step-50/hashes.bin", + want: "wavm-module-root-0x0000000000000000000000000000000000000000000000000000000000000000/message-num-100-rollup-block-hash-0x0000000000000000000000000000000000000000000000000000000000000000/subchallenge-level-1-big-step-50/hashes.bin", wantErr: false, }, } @@ -282,6 +287,7 @@ func Test_determineFilePath(t *testing.T) { } func BenchmarkCache_Read_32Mb(b *testing.B) { + ctx := context.Background() b.StopTimer() basePath := os.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { @@ -296,6 +302,9 @@ func BenchmarkCache_Read_32Mb(b *testing.B) { if err != nil { b.Fatal(err) } + if err = cache.Init(ctx); err != nil { + b.Fatal(err) + } key := &Key{ WavmModuleRoot: common.BytesToHash([]byte("foo")), MessageHeight: 0, From ee661427a390fb8558c5160c6c2b416479c2512d Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 8 Jul 2024 09:41:11 -0500 Subject: [PATCH 28/69] pruning done --- staker/challenge-cache/cache.go | 53 ++++++++++ staker/challenge-cache/cache_test.go | 142 +++++++++++++++++++++++++++ 2 files changed, 195 insertions(+) diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index 5ee775d3b9..f2f53738f7 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -22,6 +22,19 @@ Example uses: hashes.bin subchallenge-level-1-big-step-100/ hashes.bin + wavm-module-root-0xcd/ + message-num-53-rollup-block-hash-0x11.../ + hashes.bin + subchallenge-level-1-big-step-100/ + hashes.bin + message-num-69-rollup-block-hash-0x11.../ + hashes.bin + subchallenge-level-1-big-step-100/ + hashes.bin + message-num-70-rollup-block-hash-0x11.../ + hashes.bin + subchallenge-level-1-big-step-100/ + hashes.bin We namespace top-level block challenges by wavm module root. Then, we can retrieve the hashes for any data within a challenge or associated subchallenge based on the hierarchy above. @@ -37,6 +50,8 @@ import ( "io" "os" "path/filepath" + "regexp" + "strconv" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -168,6 +183,44 @@ func (c *Cache) Put(lookup *Key, hashes []common.Hash) error { return os.Rename(f.Name() /*old */, fName /* new */) } +// Prune all entries in the cache with a message number <= a specified value. +func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { + // Define a regex pattern to extract the message number + numPruned := 0 + messageNumPattern := fmt.Sprintf(`%s-(\d+)-`, messageNumberPrefix) + pattern := regexp.MustCompile(messageNumPattern) + pathsToDelete := make([]string, 0) + if err := filepath.Walk(c.baseDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + matches := pattern.FindStringSubmatch(info.Name()) + if len(matches) > 1 { + dirNameMessageNum, err := strconv.Atoi(matches[1]) + if err != nil { + return err + } + // Delete the directory if the message number is <= the specified value. + if dirNameMessageNum <= int(messageNumber) { + pathsToDelete = append(pathsToDelete, path) + } + } + } + return nil + }); err != nil { + return err + } + for _, path := range pathsToDelete { + if err := os.RemoveAll(path); err != nil { + return fmt.Errorf("could not prune directory with path %s: %w", path, err) + } + numPruned += 1 + } + log.Info("Pruned challenge cache", "numDirsPruned", numPruned, "messageNumber", messageNumPattern) + return nil +} + // Reads 32 bytes at a time from a reader up to a specified height. If none, then read all. func readHashes(r io.Reader, numToRead uint64) ([]common.Hash, error) { br := bufio.NewReader(r) diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go index 765b758571..c0d380fb12 100644 --- a/staker/challenge-cache/cache_test.go +++ b/staker/challenge-cache/cache_test.go @@ -74,6 +74,148 @@ func TestCache(t *testing.T) { } } +func TestPrune(t *testing.T) { + ctx := context.Background() + basePath := t.TempDir() + if err := os.MkdirAll(basePath, os.ModePerm); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := os.RemoveAll(basePath); err != nil { + t.Fatal(err) + } + }) + cache, err := New(basePath) + if err != nil { + t.Fatal(err) + } + if err = cache.Init(ctx); err != nil { + t.Fatal(err) + } + key := &Key{ + WavmModuleRoot: common.BytesToHash([]byte("foo")), + MessageHeight: 20, + StepHeights: []uint64{0}, + } + if _, err = cache.Get(key, 0); !errors.Is(err, ErrNotFoundInCache) { + t.Fatal(err) + } + t.Run("pruning non-existent dirs does nothing", func(t *testing.T) { + if err = cache.Prune(ctx, key.MessageHeight); err != nil { + t.Error(err) + } + }) + t.Run("pruning single item", func(t *testing.T) { + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + err = cache.Put(key, want) + if err != nil { + t.Fatal(err) + } + items, err := cache.Get(key, 3) + if err != nil { + t.Fatal(err) + } + if len(items) != len(want) { + t.Fatalf("Wrong number of hashes. Expected %d, got %d", len(want), len(items)) + } + if err = cache.Prune(ctx, key.MessageHeight); err != nil { + t.Error(err) + } + if _, err = cache.Get(key, 3); !errors.Is(err, ErrNotFoundInCache) { + t.Error(err) + } + }) + t.Run("does not prune items with message number > N", func(t *testing.T) { + want := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + key.MessageHeight = 30 + err = cache.Put(key, want) + if err != nil { + t.Fatal(err) + } + items, err := cache.Get(key, 3) + if err != nil { + t.Fatal(err) + } + if len(items) != len(want) { + t.Fatalf("Wrong number of hashes. Expected %d, got %d", len(want), len(items)) + } + if err = cache.Prune(ctx, 20); err != nil { + t.Error(err) + } + items, err = cache.Get(key, 3) + if err != nil { + t.Fatal(err) + } + if len(items) != len(want) { + t.Fatalf("Wrong number of hashes. Expected %d, got %d", len(want), len(items)) + } + }) + t.Run("prunes many items with message number <= N", func(t *testing.T) { + moduleRoots := []common.Hash{ + common.BytesToHash([]byte("foo")), + common.BytesToHash([]byte("bar")), + common.BytesToHash([]byte("baz")), + } + totalMessages := 10 + for _, root := range moduleRoots { + for i := 0; i < totalMessages; i++ { + hashes := []common.Hash{ + common.BytesToHash([]byte("a")), + common.BytesToHash([]byte("b")), + common.BytesToHash([]byte("c")), + } + key = &Key{ + WavmModuleRoot: root, + MessageHeight: uint64(i), + StepHeights: []uint64{0}, + } + if err = cache.Put(key, hashes); err != nil { + t.Fatal(err) + } + } + } + if err = cache.Prune(ctx, 5); err != nil { + t.Error(err) + } + for _, root := range moduleRoots { + // Expect that we deleted all entries with message number <= 5 + for i := 0; i <= 5; i++ { + key = &Key{ + WavmModuleRoot: root, + MessageHeight: uint64(i), + StepHeights: []uint64{0}, + } + if _, err = cache.Get(key, 3); !errors.Is(err, ErrNotFoundInCache) { + t.Error(err) + } + } + // But also expect that we kept all entries with message number > 5 + for i := 6; i < totalMessages; i++ { + key = &Key{ + WavmModuleRoot: root, + MessageHeight: uint64(i), + StepHeights: []uint64{0}, + } + items, err := cache.Get(key, 3) + if err != nil { + t.Error(err) + } + if len(items) != 3 { + t.Fatalf("Wrong number of hashes. Expected %d, got %d", 3, len(items)) + } + } + } + }) +} + func TestReadWriteStatehashes(t *testing.T) { t.Run("read up to, but had empty reader", func(t *testing.T) { b := bytes.NewBuffer([]byte{}) From 66114a56de4ff8fdf8dfe6ebec4b7cd47ecfbdd1 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 8 Jul 2024 09:41:40 -0500 Subject: [PATCH 29/69] commentary --- staker/challenge-cache/cache.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index f2f53738f7..720f71db44 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -201,7 +201,7 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { if err != nil { return err } - // Delete the directory if the message number is <= the specified value. + // Collect the directory path if the message number is <= the specified value. if dirNameMessageNum <= int(messageNumber) { pathsToDelete = append(pathsToDelete, path) } @@ -211,6 +211,8 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { }); err != nil { return err } + // We delete separately from collecting the paths, as deleting while walking + // a dir can cause issues with the filepath.Walk function. for _, path := range pathsToDelete { if err := os.RemoveAll(path); err != nil { return fmt.Errorf("could not prune directory with path %s: %w", path, err) From 2e1d30fe6cfc9e658c78462f52a8b27c1044ff64 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 8 Jul 2024 09:41:57 -0500 Subject: [PATCH 30/69] dir --- staker/challenge-cache/cache.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index 720f71db44..c2627247a1 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -22,19 +22,6 @@ Example uses: hashes.bin subchallenge-level-1-big-step-100/ hashes.bin - wavm-module-root-0xcd/ - message-num-53-rollup-block-hash-0x11.../ - hashes.bin - subchallenge-level-1-big-step-100/ - hashes.bin - message-num-69-rollup-block-hash-0x11.../ - hashes.bin - subchallenge-level-1-big-step-100/ - hashes.bin - message-num-70-rollup-block-hash-0x11.../ - hashes.bin - subchallenge-level-1-big-step-100/ - hashes.bin We namespace top-level block challenges by wavm module root. Then, we can retrieve the hashes for any data within a challenge or associated subchallenge based on the hierarchy above. From fc3402e43f3f5975fba15c26ed7d114bd09822c3 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 9 Jul 2024 11:25:36 +0200 Subject: [PATCH 31/69] Support different branches for submodule pin check --- .github/workflows/submodule-pin-check.sh | 22 ++++++++++++++++++++++ .github/workflows/submodule-pin-check.yml | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100755 .github/workflows/submodule-pin-check.sh diff --git a/.github/workflows/submodule-pin-check.sh b/.github/workflows/submodule-pin-check.sh new file mode 100755 index 0000000000..a246e2e3e9 --- /dev/null +++ b/.github/workflows/submodule-pin-check.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +declare -Ar exceptions=( + [contracts]=origin/develop + [nitro-testnode]=origin/master +) + +divergent=0 +for mod in `git submodule --quiet foreach 'echo $name'`; do + branch=origin/HEAD + if [[ -v exceptions[$mod] ]]; then + branch=${exceptions[$mod]} + fi + + if ! git -C $mod merge-base --is-ancestor HEAD $branch; then + echo $mod diverges from $branch + divergent=1 + fi +done + +exit $divergent + diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index b37cc7b9d5..56dd2148f8 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -16,5 +16,5 @@ jobs: submodules: recursive - name: Check all submodules are under origin/HEAD - run: git submodule foreach git merge-base --is-ancestor HEAD origin/HEAD + run: ${{ github.workspace }}/.github/workflows/submodule-pin-check.sh From 919b73742cfd0c01faf7afee40d761a2c11b981c Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 9 Jul 2024 11:30:41 +0200 Subject: [PATCH 32/69] Fetch submodule branches --- .github/workflows/submodule-pin-check.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 56dd2148f8..e459bad34d 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -13,8 +13,9 @@ jobs: - name: Checkout uses: actions/checkout@v4 with: + fetch-depth: 0 submodules: recursive - - name: Check all submodules are under origin/HEAD + - name: Check all submodules are ancestors of origin/HEAD or configured branch run: ${{ github.workspace }}/.github/workflows/submodule-pin-check.sh From ce25064af57a4999f3854edc926454ef9e4da052 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 9 Jul 2024 19:10:57 -0600 Subject: [PATCH 33/69] Add consensus-v31 to Dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 37c1020a42..634500f8a5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -205,6 +205,7 @@ COPY ./scripts/download-machine.sh . #RUN ./download-machine.sh consensus-v11.1 0x68e4fe5023f792d4ef584796c84d710303a5e12ea02d6e37e2b5e9c4332507c4 #RUN ./download-machine.sh consensus-v20 0x8b104a2e80ac6165dc58b9048de12f301d70b02a0ab51396c22b4b4b802a16a4 RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b && true +RUN ./download-machine.sh consensus-v31 0x260f5fa5c3176a856893642e149cf128b5a8de9f828afec8d11184415dd8dc69 FROM golang:1.21.10-bookworm as node-builder WORKDIR /workspace From a464232191be272f487ec248fcd8d994a2131083 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 10 Jul 2024 13:25:26 +0200 Subject: [PATCH 34/69] Add exceptions for some arbitrator submodules --- .github/workflows/submodule-pin-check.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/submodule-pin-check.sh b/.github/workflows/submodule-pin-check.sh index a246e2e3e9..aecb287ce1 100755 --- a/.github/workflows/submodule-pin-check.sh +++ b/.github/workflows/submodule-pin-check.sh @@ -3,6 +3,10 @@ declare -Ar exceptions=( [contracts]=origin/develop [nitro-testnode]=origin/master + + #TODO Rachel to check these are the intended branches. + [arbitrator/langs/c]=origin/vm-storage-cache + [arbitrator/tools/wasmer]=origin/adopt-v4.2.8 ) divergent=0 From dce50b0de0f0d53d5ac25ad029f6d1444b6d9f16 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Wed, 10 Jul 2024 22:33:43 +0530 Subject: [PATCH 35/69] Validate wasmModuleRoot while building docker --- Dockerfile | 3 +++ arbitrator/prover/src/main.rs | 15 +++++++++++++++ scripts/validate-wasm-module-root.sh | 16 ++++++++++++++++ 3 files changed, 34 insertions(+) create mode 100755 scripts/validate-wasm-module-root.sh diff --git a/Dockerfile b/Dockerfile index 37c1020a42..d7b4a5415b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -249,7 +249,10 @@ COPY --from=node-builder /workspace/target/bin/nitro /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/relay /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/nitro-val /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/seq-coordinator-manager /usr/local/bin/ +COPY --from=node-builder /workspace/target/bin/prover /usr/local/bin/ COPY --from=machine-versions /workspace/machines /home/user/target/machines +COPY ./scripts/validate-wasm-module-root.sh . +RUN ./validate-wasm-module-root.sh /home/user/target/machines /usr/local/bin/prover USER root RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index 9ddd5020c8..006458b597 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -41,6 +41,9 @@ struct Opts { #[structopt(long)] /// print modules to the console print_modules: bool, + #[structopt(long)] + /// print modules to the console + print_wasmmoduleroot: bool, /// profile output instead of generting proofs #[structopt(short = "p", long)] profile_run: bool, @@ -122,6 +125,18 @@ const DELAYED_HEADER_LEN: usize = 112; // also in test-case's host-io.rs & contr fn main() -> Result<()> { let opts = Opts::from_args(); + if opts.print_wasmmoduleroot { + match Machine::new_from_wavm(&opts.binary) { + Ok(mach) => { + println!("0x{}", mach.get_modules_root()); + return Ok(()); + }, + Err(err) => { + eprintln!("Error loading binary: {err}"); + return Err(err.into()); + } + } + } let mut inbox_contents = HashMap::default(); let mut inbox_position = opts.inbox_position; let mut delayed_position = opts.delayed_inbox_position; diff --git a/scripts/validate-wasm-module-root.sh b/scripts/validate-wasm-module-root.sh new file mode 100755 index 0000000000..4de0278da8 --- /dev/null +++ b/scripts/validate-wasm-module-root.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -e + +MACHINES_DIR=$1 +PROVER=$2 + +for machine in "$MACHINES_DIR"/*/ ; do + if [ -d "$machine" ]; then + expectedWasmModuleRoot=$(basename "$machine") + actualWasmModuleRoot=$(cd "$machine" && "$PROVER" machine.wavm.br --print-wasmmoduleroot) + if [ "$expectedWasmModuleRoot" != "$actualWasmModuleRoot" ]; then + echo "Error: Expected module root $expectedWasmModuleRoot but found $actualWasmModuleRoot in $machine" + exit 1 + fi + fi +done \ No newline at end of file From 867a1b13228da29fabe399a045e7ed4e09e02d02 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Wed, 10 Jul 2024 22:46:16 +0530 Subject: [PATCH 36/69] fix --- scripts/validate-wasm-module-root.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validate-wasm-module-root.sh b/scripts/validate-wasm-module-root.sh index 4de0278da8..5a7dd32abb 100755 --- a/scripts/validate-wasm-module-root.sh +++ b/scripts/validate-wasm-module-root.sh @@ -6,7 +6,7 @@ PROVER=$2 for machine in "$MACHINES_DIR"/*/ ; do if [ -d "$machine" ]; then - expectedWasmModuleRoot=$(basename "$machine") + expectedWasmModuleRoot=$(cat "$machine/module-root.txt") actualWasmModuleRoot=$(cd "$machine" && "$PROVER" machine.wavm.br --print-wasmmoduleroot) if [ "$expectedWasmModuleRoot" != "$actualWasmModuleRoot" ]; then echo "Error: Expected module root $expectedWasmModuleRoot but found $actualWasmModuleRoot in $machine" From 8daee6e3dbf3e17b951ffa62b59be25bb8cf1874 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 10 Jul 2024 16:49:50 -0500 Subject: [PATCH 37/69] Produce a more descriptive error when validation server url is not set to a WS/WSS server --- staker/stateless_block_validator.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 1cf3d7a4c3..61ace07cb5 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "net/url" "testing" "github.com/offchainlabs/nitro/arbstate/daprovider" @@ -428,8 +429,13 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { return fmt.Errorf("starting execution spawner: %w", err) } } - for _, spawner := range v.execSpawners { + for i, spawner := range v.execSpawners { if err := spawner.Start(ctx_in); err != nil { + if u, parseErr := url.Parse(v.config.ValidationServerConfigs[i].URL); parseErr == nil { + if u.Scheme == "http" || u.Scheme == "https" { + return fmt.Errorf("validation server's url scheme: %s is unsupported, it should either be ws or wss, err: %w", u.Scheme, err) + } + } return err } } From 725baeffbe1376bde833d082df82fbffe2e27245 Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Thu, 11 Jul 2024 10:21:24 +0800 Subject: [PATCH 38/69] refactor: rename and set the default value to 0 for log query batch size --- staker/l1_validator.go | 2 +- staker/rollup_watcher.go | 4 ++-- staker/staker.go | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 14c46b5ddb..dd9673ee0b 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -381,7 +381,7 @@ func (v *L1Validator) generateNodeAction( return nil, false, nil } - successorNodes, err := v.rollup.LookupNodeChildren(ctx, stakerInfo.LatestStakedNode, stakerConfig.LogQueryRange, stakerInfo.LatestStakedNodeHash) + successorNodes, err := v.rollup.LookupNodeChildren(ctx, stakerInfo.LatestStakedNode, stakerConfig.LogQueryBatchSize, stakerInfo.LatestStakedNodeHash) if err != nil { return nil, false, fmt.Errorf("error looking up node %v (hash %v) children: %w", stakerInfo.LatestStakedNode, stakerInfo.LatestStakedNodeHash, err) } diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 4ff8db0ce3..2c6841eb49 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -166,7 +166,7 @@ func (r *RollupWatcher) LookupNode(ctx context.Context, number uint64) (*NodeInf }, nil } -func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, logQueryRange uint64, nodeHash common.Hash) ([]*NodeInfo, error) { +func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, logQueryRangeSize uint64, nodeHash common.Hash) ([]*NodeInfo, error) { node, err := r.RollupUserLogic.GetNode(r.getCallOpts(ctx), nodeNum) if err != nil { return nil, err @@ -193,7 +193,7 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, // break down the query to avoid eth_getLogs query limit for toBlock.Cmp(fromBlock) > 0 { query.FromBlock = fromBlock - query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRange))) + query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRangeSize))) if query.ToBlock.Cmp(toBlock) > 0 { query.ToBlock = toBlock } diff --git a/staker/staker.go b/staker/staker.go index f7f412ca16..6d3b246230 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -90,7 +90,7 @@ type L1ValidatorConfig struct { ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` Dangerous DangerousConfig `koanf:"dangerous"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` - LogQueryRange uint64 `koanf:"log-query-range" reload:"hot"` + LogQueryBatchSize uint64 `koanf:"log-query-batch-size" reload:"hot"` strategy StakerStrategy gasRefunder common.Address @@ -157,7 +157,7 @@ var DefaultL1ValidatorConfig = L1ValidatorConfig{ ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, - LogQueryRange: 999, + LogQueryBatchSize: 0, } var TestL1ValidatorConfig = L1ValidatorConfig{ @@ -178,7 +178,7 @@ var TestL1ValidatorConfig = L1ValidatorConfig{ ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, - LogQueryRange: 999, + LogQueryBatchSize: 999, } var DefaultValidatorL1WalletConfig = genericconf.WalletConfig{ @@ -204,7 +204,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") - f.Uint64(prefix+".log-query-range", DefaultL1ValidatorConfig.LogQueryRange, "range ro query from eth_getLogs") + f.Uint64(prefix+".log-query-range", DefaultL1ValidatorConfig.LogQueryBatchSize, "range ro query from eth_getLogs") dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) From 0433d001bcee54de25fedcba0a51eb8bfb4963e5 Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Thu, 11 Jul 2024 10:23:54 +0800 Subject: [PATCH 39/69] chore: rename command name --- staker/staker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/staker.go b/staker/staker.go index 6d3b246230..a924660b7e 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -204,7 +204,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") - f.Uint64(prefix+".log-query-range", DefaultL1ValidatorConfig.LogQueryBatchSize, "range ro query from eth_getLogs") + f.Uint64(prefix+".log-query-range-size", DefaultL1ValidatorConfig.LogQueryBatchSize, "range ro query from eth_getLogs") dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) From e89e283295a6bcaad610fe1ee11b4fded1160627 Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Thu, 11 Jul 2024 10:30:57 +0800 Subject: [PATCH 40/69] chore: set the default value to 0 for TestL1ValidatorConfig --- staker/staker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/staker.go b/staker/staker.go index a924660b7e..5bc96f5192 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -178,7 +178,7 @@ var TestL1ValidatorConfig = L1ValidatorConfig{ ExtraGas: 50000, Dangerous: DefaultDangerousConfig, ParentChainWallet: DefaultValidatorL1WalletConfig, - LogQueryBatchSize: 999, + LogQueryBatchSize: 0, } var DefaultValidatorL1WalletConfig = genericconf.WalletConfig{ From d22da37b44e6fb29bce333a08885497994a1762f Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Thu, 11 Jul 2024 12:01:17 +0800 Subject: [PATCH 41/69] fix: query all requested logs while logQueryRangeSize is 0 --- staker/rollup_watcher.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 2c6841eb49..b35bebd1c6 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -193,7 +193,11 @@ func (r *RollupWatcher) LookupNodeChildren(ctx context.Context, nodeNum uint64, // break down the query to avoid eth_getLogs query limit for toBlock.Cmp(fromBlock) > 0 { query.FromBlock = fromBlock - query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRangeSize))) + if logQueryRangeSize == 0 { + query.ToBlock = toBlock + } else { + query.ToBlock = new(big.Int).Add(fromBlock, big.NewInt(int64(logQueryRangeSize))) + } if query.ToBlock.Cmp(toBlock) > 0 { query.ToBlock = toBlock } From 4781fdfa0c550c51a944ce7090330e749c2d022b Mon Sep 17 00:00:00 2001 From: xiaohuo Date: Thu, 11 Jul 2024 12:11:51 +0800 Subject: [PATCH 42/69] fix: subcommand name for LogQueryBatchSize --- staker/staker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/staker.go b/staker/staker.go index 5bc96f5192..24f5dc61e3 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -204,7 +204,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") - f.Uint64(prefix+".log-query-range-size", DefaultL1ValidatorConfig.LogQueryBatchSize, "range ro query from eth_getLogs") + f.Uint64(prefix+".log-query-batch-size", DefaultL1ValidatorConfig.LogQueryBatchSize, "range ro query from eth_getLogs") dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) From 8f2dc2ec62cd763cf5fd5b0919c9f02c365ddfaf Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 10 Jul 2024 22:50:47 -0600 Subject: [PATCH 43/69] Switch merge checks action to pull_request_target event --- .github/workflows/merge-checks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge-checks.yml b/.github/workflows/merge-checks.yml index b729df2b26..6561c429e2 100644 --- a/.github/workflows/merge-checks.yml +++ b/.github/workflows/merge-checks.yml @@ -1,7 +1,7 @@ name: Merge Checks on: - pull_request: + pull_request_target: branches: [ master ] types: [synchronize, opened, reopened, labeled, unlabeled] From 164dd6546ead76e06b6d204a11ee77e1e9b633e8 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 10:49:54 +0530 Subject: [PATCH 44/69] Metrics for total gas used for eth_call and total gas used for eth_estimateGas --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index e35bf9cdd3..b96ece4f84 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit e35bf9cdd3d02034ac1be34a479d101f12012ba6 +Subproject commit b96ece4f8463a8689e6e7898ab754b776fe23da0 From f77bb8dd70702d6667b8dd45c6dc8051b7e41786 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 10:53:41 +0530 Subject: [PATCH 45/69] update module --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index b96ece4f84..c186780e82 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit b96ece4f8463a8689e6e7898ab754b776fe23da0 +Subproject commit c186780e82856be27d1576db4d1fd79984562165 From 73be2edc259a64d3b0d8f08ec95afd89caf8eb3f Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 10:59:46 +0530 Subject: [PATCH 46/69] Changes based on PR comments --- .github/workflows/ci.yml | 2 +- system_tests/fees_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 02ef915f16..7fea04de4b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -145,7 +145,7 @@ jobs: if: matrix.test-mode == 'defaults' run: | packages=`go list ./...` - stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m -parallel=8 -tags=feestest > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") + stdbuf -oL gotestsum --format short-verbose --packages="$packages" --rerun-fails=1 --no-color=false -- ./... -coverprofile=coverage.txt -covermode=atomic -coverpkg=./...,./go-ethereum/... -timeout 20m -parallel=8 -tags=cionly > >(stdbuf -oL tee full.log | grep -vE "INFO|seal") - name: run tests with race detection if: matrix.test-mode == 'race' diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 2a53e42a23..ccca82e009 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -4,8 +4,8 @@ // these tests seems to consume too much memory with race detection // Test randomly fails with L1 gas price estimate should tend toward the basefee // so skipping locally, but running on CI -//go:build !race && feestest -// +build !race,feestest +//go:build !race && cionly +// +build !race,cionly package arbtest From c8602b39cb5b5f45a0a09741ec553edabe0bd53a Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 11:04:17 +0530 Subject: [PATCH 47/69] fix --- arbitrator/prover/src/main.rs | 2 +- scripts/validate-wasm-module-root.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index 006458b597..2d09d30cea 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -42,7 +42,7 @@ struct Opts { /// print modules to the console print_modules: bool, #[structopt(long)] - /// print modules to the console + /// print wasm module root to the console print_wasmmoduleroot: bool, /// profile output instead of generting proofs #[structopt(short = "p", long)] diff --git a/scripts/validate-wasm-module-root.sh b/scripts/validate-wasm-module-root.sh index 5a7dd32abb..f62a46a8d7 100755 --- a/scripts/validate-wasm-module-root.sh +++ b/scripts/validate-wasm-module-root.sh @@ -13,4 +13,4 @@ for machine in "$MACHINES_DIR"/*/ ; do exit 1 fi fi -done \ No newline at end of file +done From 3692cce6585f31df46e474d145054e18dfd985d1 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 11:19:33 +0530 Subject: [PATCH 48/69] fix --- arbitrator/prover/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index 2d09d30cea..0f4d19bbaf 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -133,7 +133,7 @@ fn main() -> Result<()> { }, Err(err) => { eprintln!("Error loading binary: {err}"); - return Err(err.into()); + return Err(err); } } } From dfe1fa78337ad447aa8891675dc41f9570b8dd66 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 12:07:18 +0530 Subject: [PATCH 49/69] fix --- arbitrator/prover/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbitrator/prover/src/main.rs b/arbitrator/prover/src/main.rs index 0f4d19bbaf..e10a415389 100644 --- a/arbitrator/prover/src/main.rs +++ b/arbitrator/prover/src/main.rs @@ -130,7 +130,7 @@ fn main() -> Result<()> { Ok(mach) => { println!("0x{}", mach.get_modules_root()); return Ok(()); - }, + } Err(err) => { eprintln!("Error loading binary: {err}"); return Err(err); From 14ba36cc1c70973bbc7062b9da5e4b5bfc93d5d2 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 11 Jul 2024 11:34:37 +0200 Subject: [PATCH 50/69] Move TxLookupLimit override into ParseNode Overriding it outside of ParseNode, which is called when reloading the config via LiveConfig, causes a mismatch in TxLookupLimit which is not hot reloadable. --- cmd/nitro/nitro.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 572e6d2f06..088cec01fa 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -369,11 +369,6 @@ func mainImpl() int { return 0 } - if nodeConfig.Execution.Caching.Archive && nodeConfig.Execution.TxLookupLimit != 0 { - log.Info("retaining ability to lookup full transaction history as archive mode is enabled") - nodeConfig.Execution.TxLookupLimit = 0 - } - if err := resourcemanager.Init(&nodeConfig.Node.ResourceMgmt); err != nil { flag.Usage() log.Crit("Failed to start resource management module", "err", err) @@ -914,6 +909,12 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa if nodeConfig.Execution.Caching.Archive { nodeConfig.Node.MessagePruner.Enable = false } + + if nodeConfig.Execution.Caching.Archive && nodeConfig.Execution.TxLookupLimit != 0 { + log.Info("retaining ability to lookup full transaction history as archive mode is enabled") + nodeConfig.Execution.TxLookupLimit = 0 + } + err = nodeConfig.Validate() if err != nil { return nil, nil, err From fc3783a9698dbdd537a8c50bacc72565b1402be2 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 21:41:26 +0530 Subject: [PATCH 51/69] Test EstimateRetryableTicket With No Funds And Zero GasPrice --- system_tests/retryable_test.go | 42 ++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 1abf9f2162..a5f18f6303 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -131,6 +131,48 @@ func TestRetryableNoExist(t *testing.T) { } } +func TestEstimateRetryableTicketWithNoFundsAndZeroGas(t *testing.T) { + t.Parallel() + builder, _, _, ctx, teardown := retryableSetup(t) + defer teardown() + + user2Address := builder.L2Info.GetAddress("User2") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + + deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) + callValue := big.NewInt(1e6) + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) + Require(t, err, "failed to deploy NodeInterface") + + // estimate the gas needed to auto redeem the retryable + builder.L2Info.GenerateAccount("zerofunds") + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("zerofunds", ctx) + usertxoptsL2.NoSend = true + usertxoptsL2.GasMargin = 0 + usertxoptsL2.GasPrice = big.NewInt(0) + tx, err := nodeInterface.EstimateRetryableTicket( + &usertxoptsL2, + usertxoptsL2.From, + deposit, + user2Address, + callValue, + beneficiaryAddress, + beneficiaryAddress, + []byte{0x32, 0x42, 0x32, 0x88}, // increase the cost to beyond that of params.TxGas + ) + Require(t, err, "failed to estimate retryable submission") + estimate := tx.Gas() + expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 + if float64(estimate) > float64(expectedEstimate)*(1+gasestimator.EstimateGasErrorRatio) { + t.Errorf("estimated retryable ticket at %v gas but expected %v, with error margin of %v", + estimate, + expectedEstimate, + gasestimator.EstimateGasErrorRatio, + ) + } +} + func TestSubmitRetryableImmediateSuccess(t *testing.T) { t.Parallel() builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) From a656df40b0d5ff3339e1725850476bbac0e43f68 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 21:41:40 +0530 Subject: [PATCH 52/69] fix --- system_tests/retryable_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index a5f18f6303..ba0eb37578 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -131,7 +131,7 @@ func TestRetryableNoExist(t *testing.T) { } } -func TestEstimateRetryableTicketWithNoFundsAndZeroGas(t *testing.T) { +func TestEstimateRetryableTicketWithNoFundsAndZeroGasPrice(t *testing.T) { t.Parallel() builder, _, _, ctx, teardown := retryableSetup(t) defer teardown() From a5892d858f9d74242b7cce2be62a006a9ffe13a1 Mon Sep 17 00:00:00 2001 From: Aman Sanghi Date: Thu, 11 Jul 2024 21:44:33 +0530 Subject: [PATCH 53/69] fix --- system_tests/retryable_test.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index ba0eb37578..268d59ac98 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -145,13 +145,12 @@ func TestEstimateRetryableTicketWithNoFundsAndZeroGasPrice(t *testing.T) { nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) Require(t, err, "failed to deploy NodeInterface") - // estimate the gas needed to auto redeem the retryable builder.L2Info.GenerateAccount("zerofunds") usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("zerofunds", ctx) usertxoptsL2.NoSend = true usertxoptsL2.GasMargin = 0 usertxoptsL2.GasPrice = big.NewInt(0) - tx, err := nodeInterface.EstimateRetryableTicket( + _, err = nodeInterface.EstimateRetryableTicket( &usertxoptsL2, usertxoptsL2.From, deposit, @@ -159,18 +158,9 @@ func TestEstimateRetryableTicketWithNoFundsAndZeroGasPrice(t *testing.T) { callValue, beneficiaryAddress, beneficiaryAddress, - []byte{0x32, 0x42, 0x32, 0x88}, // increase the cost to beyond that of params.TxGas + []byte{}, ) Require(t, err, "failed to estimate retryable submission") - estimate := tx.Gas() - expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 - if float64(estimate) > float64(expectedEstimate)*(1+gasestimator.EstimateGasErrorRatio) { - t.Errorf("estimated retryable ticket at %v gas but expected %v, with error margin of %v", - estimate, - expectedEstimate, - gasestimator.EstimateGasErrorRatio, - ) - } } func TestSubmitRetryableImmediateSuccess(t *testing.T) { From bca0114226d2973fd1ab31d6e3f7b2313b6e6d40 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 11 Jul 2024 11:26:29 -0500 Subject: [PATCH 54/69] address PR comments --- staker/stateless_block_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 61ace07cb5..e263e1c402 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -433,7 +433,7 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { if err := spawner.Start(ctx_in); err != nil { if u, parseErr := url.Parse(v.config.ValidationServerConfigs[i].URL); parseErr == nil { if u.Scheme == "http" || u.Scheme == "https" { - return fmt.Errorf("validation server's url scheme: %s is unsupported, it should either be ws or wss, err: %w", u.Scheme, err) + return fmt.Errorf("validation server's url scheme is unsupported, it should either be ws or wss, url:%s err: %w", v.config.ValidationServerConfigs[i].URL, err) } } return err From 11de4f95a2f715081b0eaad2ed0f8fae45c11fd4 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 11 Jul 2024 15:02:23 -0600 Subject: [PATCH 55/69] Add check for after-next-version label to design approved status --- .github/workflows/merge-checks.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/merge-checks.yml b/.github/workflows/merge-checks.yml index 6561c429e2..28b29d5cec 100644 --- a/.github/workflows/merge-checks.yml +++ b/.github/workflows/merge-checks.yml @@ -1,7 +1,7 @@ name: Merge Checks on: - pull_request_target: + pull_request: branches: [ master ] types: [synchronize, opened, reopened, labeled, unlabeled] @@ -17,7 +17,7 @@ jobs: run: | set -x pipefail status_state="pending" - if ${{ contains(github.event.*.labels.*.name, 'design-approved') }}; then + if ${{ contains(github.event.pull_request.labels.*.name, 'design-approved') && !contains(github.event.pull_request.labels.*.name, 'after-next-version') }}; then status_state="success" else resp="$(curl -sSL --fail-with-body \ From 13433ce94961182ac586d86190cb56c90b6132e2 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 11 Jul 2024 15:04:08 -0600 Subject: [PATCH 56/69] Undo change for testing --- .github/workflows/merge-checks.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge-checks.yml b/.github/workflows/merge-checks.yml index 28b29d5cec..c9f7957389 100644 --- a/.github/workflows/merge-checks.yml +++ b/.github/workflows/merge-checks.yml @@ -1,7 +1,7 @@ name: Merge Checks on: - pull_request: + pull_request_target: branches: [ master ] types: [synchronize, opened, reopened, labeled, unlabeled] From 1d5ba38978d47bbd9fcffc5dee2e6e1fd6b3f904 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Fri, 12 Jul 2024 12:22:54 -0300 Subject: [PATCH 57/69] Rename BuildL2WithL1 to BuildL2OnL1 --- system_tests/common_test.go | 4 ++-- system_tests/das_test.go | 6 +++--- system_tests/full_challenge_impl_test.go | 2 +- system_tests/nodeinterface_test.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 7797db9745..9294e7794e 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -218,7 +218,7 @@ func (b *NodeBuilder) Build(t *testing.T) func() { b.CheckConfig(t) if b.withL1 { b.BuildL1(t) - return b.BuildL2WithL1(t) + return b.BuildL2OnL1(t) } return b.BuildL2(t) } @@ -257,7 +257,7 @@ func (b *NodeBuilder) BuildL1(t *testing.T) { b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } } -func (b *NodeBuilder) BuildL2WithL1(t *testing.T) func() { +func (b *NodeBuilder) BuildL2OnL1(t *testing.T) func() { if b.L1 == nil { panic("must build L1 before building L2") } diff --git a/system_tests/das_test.go b/system_tests/das_test.go index a37aca520e..76ec2d7493 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -122,7 +122,7 @@ func TestDASRekey(t *testing.T) { // Setup L2 chain builder.L2Info.GenerateAccount("User2") - builder.BuildL2WithL1(t) + builder.BuildL2OnL1(t) // Setup second node l1NodeConfigB.BlockValidator.Enable = false @@ -154,7 +154,7 @@ func TestDASRekey(t *testing.T) { // Restart the node on the new keyset against the new DAS server running on the same disk as the first with new keys builder.nodeConfig.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) builder.l2StackConfig = createStackConfigForTest(builder.dataDir) - cleanup := builder.BuildL2WithL1(t) + cleanup := builder.BuildL2OnL1(t) defer cleanup() nodeBParams := SecondNodeParams{ @@ -276,7 +276,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { // Setup L2 chain builder.L2Info = NewArbTestInfo(t, builder.chainConfig.ChainID) builder.L2Info.GenerateAccount("User2") - cleanup := builder.BuildL2WithL1(t) + cleanup := builder.BuildL2OnL1(t) defer cleanup() // Create node to sync from chain diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 47d1b7c3c5..bf95c390ba 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -281,7 +281,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterRollupAddresses.Bridge = asserterBridgeAddr asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr - cleanup := builder.BuildL2WithL1(t) + cleanup := builder.BuildL2OnL1(t) defer cleanup() asserterL2 := builder.L2.ConsensusNode asserterL2Info := builder.L2Info diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go index f1665e1d7c..17bfb18892 100644 --- a/system_tests/nodeinterface_test.go +++ b/system_tests/nodeinterface_test.go @@ -41,7 +41,7 @@ func TestFindBatch(t *testing.T) { builder.addresses.Bridge = bridgeAddr builder.addresses.SequencerInbox = seqInboxAddr - cleanup := builder.BuildL2WithL1(t) + cleanup := builder.BuildL2OnL1(t) defer cleanup() nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) From ea1864177406169a38e358a84af1d15e33ac6462 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Fri, 12 Jul 2024 12:24:05 -0300 Subject: [PATCH 58/69] Use t.Fatal instead of panic --- system_tests/common_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 9294e7794e..9a54e504d0 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -259,7 +259,7 @@ func (b *NodeBuilder) BuildL1(t *testing.T) { func (b *NodeBuilder) BuildL2OnL1(t *testing.T) func() { if b.L1 == nil { - panic("must build L1 before building L2") + t.Fatal("must build L1 before building L2") } b.L2 = NewTestClient(b.ctx) From 783f12f9877c89421fe57652e61c463a871d32af Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 12 Jul 2024 16:21:28 -0500 Subject: [PATCH 59/69] address PR comments --- staker/stateless_block_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index e263e1c402..ec235c4bf5 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -432,7 +432,7 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { for i, spawner := range v.execSpawners { if err := spawner.Start(ctx_in); err != nil { if u, parseErr := url.Parse(v.config.ValidationServerConfigs[i].URL); parseErr == nil { - if u.Scheme == "http" || u.Scheme == "https" { + if u.Scheme != "ws" && u.Scheme != "wss" { return fmt.Errorf("validation server's url scheme is unsupported, it should either be ws or wss, url:%s err: %w", v.config.ValidationServerConfigs[i].URL, err) } } From cbdf31fe9d5e071272a5e4bbdfc05af6cef1c711 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 12 Jul 2024 15:28:07 -0600 Subject: [PATCH 60/69] Fix disabling P2P --- cmd/nitro-val/nitro_val.go | 3 +++ cmd/nitro/nitro.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 6f5f546430..1a7d2e6283 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -70,6 +70,9 @@ func mainImpl() int { nodeConfig.WS.Apply(&stackConf) nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) + stackConf.P2P.ListenAddr = "" + stackConf.P2P.NoDial = true + stackConf.P2P.NoDiscovery = true vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = strippedRevision diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 04bdeb3228..a4ef1b2945 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -183,6 +183,9 @@ func mainImpl() int { if nodeConfig.WS.ExposeAll { stackConf.WSModules = append(stackConf.WSModules, "personal") } + stackConf.P2P.ListenAddr = "" + stackConf.P2P.NoDial = true + stackConf.P2P.NoDiscovery = true vcsRevision, strippedRevision, vcsTime := confighelpers.GetVersion() stackConf.Version = strippedRevision From a42a7a152402c5438b04934039b8ce684dfa1d4c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 12 Jul 2024 16:51:19 -0500 Subject: [PATCH 61/69] Update scopeContext's stack to have correct contract address while tracing for ethCalls --- arbos/programs/api.go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/arbos/programs/api.go b/arbos/programs/api.go index 65a58a47c2..ed43751a54 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -137,16 +137,29 @@ func newApiClosures( startGas := am.SaturatingUSub(gasLeft, baseCost) * 63 / 64 gas := am.MinInt(startGas, gasReq) - // Tracing: emit the call (value transfer is done later in evm.Call) - if tracingInfo != nil { - tracingInfo.Tracer.CaptureState(0, opcode, startGas, baseCost+gas, scope, []byte{}, depth, nil) - } - // EVM rule: calls that pay get a stipend (opCall) if value.Sign() != 0 { gas = am.SaturatingUAdd(gas, params.CallStipend) } + // Tracing: emit the call (value transfer is done later in evm.Call) + if tracingInfo != nil { + s := &vm.ScopeContext{ + Memory: vm.NewMemory(), + Stack: util.TracingStackFromArgs( + *uint256.NewInt(gas), // gas + *uint256.NewInt(0).SetBytes(contract.Bytes()), // to address + *uint256.NewInt(0).SetBytes(value.Bytes()), // call value + *uint256.NewInt(0), // memory offset + *uint256.NewInt(uint64(len(input))), // memory length + *uint256.NewInt(0), // return offset + *uint256.NewInt(0), // return size + ), + Contract: scope.Contract, + } + tracingInfo.Tracer.CaptureState(0, opcode, startGas, baseCost+gas, s, []byte{}, depth, nil) + } + var ret []byte var returnGas uint64 From ead2b2412730abae7b8262cddf8e89ea44ba2f35 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 14 Jul 2024 23:54:16 -0600 Subject: [PATCH 62/69] Fix "sequencer batches out of order" log --- arbnode/sequencer_inbox.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index 46e1edb78b..73e52ded53 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -232,7 +232,7 @@ func (i *SequencerInbox) LookupBatchesInRange(ctx context.Context, from, to *big seqNum := parsedLog.BatchSequenceNumber.Uint64() if lastSeqNum != nil { if seqNum != *lastSeqNum+1 { - return nil, fmt.Errorf("sequencer batches out of order; after batch %v got batch %v", lastSeqNum, seqNum) + return nil, fmt.Errorf("sequencer batches out of order; after batch %v got batch %v", *lastSeqNum, seqNum) } } lastSeqNum = &seqNum From 15a636c7e17897cea03a1f96d900668b3ee43bdf Mon Sep 17 00:00:00 2001 From: a-thomas-22 Date: Mon, 15 Jul 2024 09:29:44 -0500 Subject: [PATCH 63/69] Replace - with _ in device names --- util/iostat/iostat.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/util/iostat/iostat.go b/util/iostat/iostat.go index 9bc5ff800c..01a63fdfe1 100644 --- a/util/iostat/iostat.go +++ b/util/iostat/iostat.go @@ -30,7 +30,8 @@ func RegisterAndPopulateMetrics(ctx context.Context, spawnInterval, maxDeviceCou if _, ok := deviceMetrics[stat.DeviceName]; !ok { // Register metrics for a maximum of maxDeviceCount (fail safe incase iostat command returns incorrect names indefinitely) if len(deviceMetrics) < maxDeviceCount { - baseMetricName := fmt.Sprintf("isotat/%s/", stat.DeviceName) + sanitizedDeviceName := strings.Replace(stat.DeviceName, "-", "_", -1) + baseMetricName := fmt.Sprintf("isotat/%s/", sanitizedDeviceName) deviceMetrics[stat.DeviceName] = make(map[string]metrics.GaugeFloat64) deviceMetrics[stat.DeviceName]["readspersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"readspersecond", nil) deviceMetrics[stat.DeviceName]["writespersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"writespersecond", nil) From b8b8d6195909ad507972c508bdbf47653340c4eb Mon Sep 17 00:00:00 2001 From: a-thomas-22 Date: Mon, 15 Jul 2024 09:34:40 -0500 Subject: [PATCH 64/69] add coment --- util/iostat/iostat.go | 1 + 1 file changed, 1 insertion(+) diff --git a/util/iostat/iostat.go b/util/iostat/iostat.go index 01a63fdfe1..3db27e9423 100644 --- a/util/iostat/iostat.go +++ b/util/iostat/iostat.go @@ -30,6 +30,7 @@ func RegisterAndPopulateMetrics(ctx context.Context, spawnInterval, maxDeviceCou if _, ok := deviceMetrics[stat.DeviceName]; !ok { // Register metrics for a maximum of maxDeviceCount (fail safe incase iostat command returns incorrect names indefinitely) if len(deviceMetrics) < maxDeviceCount { + // Replace hyphens with underscores to avoid metric name issues sanitizedDeviceName := strings.Replace(stat.DeviceName, "-", "_", -1) baseMetricName := fmt.Sprintf("isotat/%s/", sanitizedDeviceName) deviceMetrics[stat.DeviceName] = make(map[string]metrics.GaugeFloat64) From cf2387435cfd70ef7670b52bd02cab852f5944b0 Mon Sep 17 00:00:00 2001 From: a-thomas-22 Date: Mon, 15 Jul 2024 09:36:29 -0500 Subject: [PATCH 65/69] fix metric name --- util/iostat/iostat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/iostat/iostat.go b/util/iostat/iostat.go index 3db27e9423..d401da761e 100644 --- a/util/iostat/iostat.go +++ b/util/iostat/iostat.go @@ -32,7 +32,7 @@ func RegisterAndPopulateMetrics(ctx context.Context, spawnInterval, maxDeviceCou if len(deviceMetrics) < maxDeviceCount { // Replace hyphens with underscores to avoid metric name issues sanitizedDeviceName := strings.Replace(stat.DeviceName, "-", "_", -1) - baseMetricName := fmt.Sprintf("isotat/%s/", sanitizedDeviceName) + baseMetricName := fmt.Sprintf("iostat/%s/", sanitizedDeviceName) deviceMetrics[stat.DeviceName] = make(map[string]metrics.GaugeFloat64) deviceMetrics[stat.DeviceName]["readspersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"readspersecond", nil) deviceMetrics[stat.DeviceName]["writespersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"writespersecond", nil) From 423c966f4599b1ef5ec3a92157c0bc754f3cff6e Mon Sep 17 00:00:00 2001 From: a-thomas-22 Date: Mon, 15 Jul 2024 09:51:42 -0500 Subject: [PATCH 66/69] lint fix --- util/iostat/iostat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/iostat/iostat.go b/util/iostat/iostat.go index d401da761e..342a44100e 100644 --- a/util/iostat/iostat.go +++ b/util/iostat/iostat.go @@ -31,7 +31,7 @@ func RegisterAndPopulateMetrics(ctx context.Context, spawnInterval, maxDeviceCou // Register metrics for a maximum of maxDeviceCount (fail safe incase iostat command returns incorrect names indefinitely) if len(deviceMetrics) < maxDeviceCount { // Replace hyphens with underscores to avoid metric name issues - sanitizedDeviceName := strings.Replace(stat.DeviceName, "-", "_", -1) + sanitizedDeviceName := strings.ReplaceAll(stat.DeviceName, "-", "_") baseMetricName := fmt.Sprintf("iostat/%s/", sanitizedDeviceName) deviceMetrics[stat.DeviceName] = make(map[string]metrics.GaugeFloat64) deviceMetrics[stat.DeviceName]["readspersecond"] = metrics.NewRegisteredGaugeFloat64(baseMetricName+"readspersecond", nil) From d7a545218ac7051b97d2ee74054a6209c0fb14e8 Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Mon, 15 Jul 2024 17:43:52 +0200 Subject: [PATCH 67/69] [Metric Changes] Fix a typo omitting an "i" The word "Conditional" is spelled with two "i"s but prior to this commit contained only one in each of the metric paths. Related to: https://linear.app/offchain-labs/issue/NIT-1367/fix-missing-i-in-metric-arbtxprecheckercondtionaltx --- execution/gethexec/sequencer.go | 4 ++-- execution/gethexec/tx_pre_checker.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 2bace9b677..9fbf687677 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -52,8 +52,8 @@ var ( nonceFailureCacheOverflowCounter = metrics.NewRegisteredGauge("arb/sequencer/noncefailurecache/overflow", nil) blockCreationTimer = metrics.NewRegisteredTimer("arb/sequencer/block/creation", nil) successfulBlocksCounter = metrics.NewRegisteredCounter("arb/sequencer/block/successful", nil) - conditionalTxRejectedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/condtionaltx/rejected", nil) - conditionalTxAcceptedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/condtionaltx/accepted", nil) + conditionalTxRejectedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/conditionaltx/rejected", nil) + conditionalTxAcceptedBySequencerCounter = metrics.NewRegisteredCounter("arb/sequencer/conditionaltx/accepted", nil) l1GasPriceGauge = metrics.NewRegisteredGauge("arb/sequencer/l1gasprice", nil) callDataUnitsBacklogGauge = metrics.NewRegisteredGauge("arb/sequencer/calldataunitsbacklog", nil) unusedL1GasChargeGauge = metrics.NewRegisteredGauge("arb/sequencer/unusedl1gascharge", nil) diff --git a/execution/gethexec/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go index 1a48d75fda..dacfd32e81 100644 --- a/execution/gethexec/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -23,10 +23,10 @@ import ( ) var ( - conditionalTxRejectedByTxPreCheckerCurrentStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/condtionaltx/currentstate/rejected", nil) - conditionalTxAcceptedByTxPreCheckerCurrentStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/condtionaltx/currentstate/accepted", nil) - conditionalTxRejectedByTxPreCheckerOldStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/condtionaltx/oldstate/rejected", nil) - conditionalTxAcceptedByTxPreCheckerOldStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/condtionaltx/oldstate/accepted", nil) + conditionalTxRejectedByTxPreCheckerCurrentStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/conditionaltx/currentstate/rejected", nil) + conditionalTxAcceptedByTxPreCheckerCurrentStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/conditionaltx/currentstate/accepted", nil) + conditionalTxRejectedByTxPreCheckerOldStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/conditionaltx/oldstate/rejected", nil) + conditionalTxAcceptedByTxPreCheckerOldStateCounter = metrics.NewRegisteredCounter("arb/txprechecker/conditionaltx/oldstate/accepted", nil) ) const TxPreCheckerStrictnessNone uint = 0 From 256d27ecf0ee6ce6aa33c7b4b22ea17ade58dab3 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 15 Jul 2024 10:47:11 -0500 Subject: [PATCH 68/69] make scopecontext stack accurate towards delegate and static calls --- arbos/programs/api.go | 22 ++++++++++++---------- arbos/util/tracing.go | 5 +---- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/arbos/programs/api.go b/arbos/programs/api.go index ed43751a54..0519cd9f01 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -144,17 +144,19 @@ func newApiClosures( // Tracing: emit the call (value transfer is done later in evm.Call) if tracingInfo != nil { + var args []uint256.Int + args = append(args, *uint256.NewInt(gas)) // gas + args = append(args, *uint256.NewInt(0).SetBytes(contract.Bytes())) // to address + if opcode == vm.CALL { + args = append(args, *uint256.NewInt(0).SetBytes(value.Bytes())) // call value + } + args = append(args, *uint256.NewInt(0)) // memory offset + args = append(args, *uint256.NewInt(uint64(len(input)))) // memory length + args = append(args, *uint256.NewInt(0)) // return offset + args = append(args, *uint256.NewInt(0)) // return size s := &vm.ScopeContext{ - Memory: vm.NewMemory(), - Stack: util.TracingStackFromArgs( - *uint256.NewInt(gas), // gas - *uint256.NewInt(0).SetBytes(contract.Bytes()), // to address - *uint256.NewInt(0).SetBytes(value.Bytes()), // call value - *uint256.NewInt(0), // memory offset - *uint256.NewInt(uint64(len(input))), // memory length - *uint256.NewInt(0), // return offset - *uint256.NewInt(0), // return size - ), + Memory: util.TracingMemoryFromBytes(input), + Stack: util.TracingStackFromArgs(args...), Contract: scope.Contract, } tracingInfo.Tracer.CaptureState(0, opcode, startGas, baseCost+gas, s, []byte{}, depth, nil) diff --git a/arbos/util/tracing.go b/arbos/util/tracing.go index f0f101bc20..f3564143c5 100644 --- a/arbos/util/tracing.go +++ b/arbos/util/tracing.go @@ -56,11 +56,8 @@ func (info *TracingInfo) RecordEmitLog(topics []common.Hash, data []byte) { for _, topic := range topics { args = append(args, HashToUint256(topic)) // topic: 32-byte value. Max topics count is 4 } - memory := vm.NewMemory() - memory.Resize(size) - memory.Set(0, size, data) scope := &vm.ScopeContext{ - Memory: memory, + Memory: TracingMemoryFromBytes(data), Stack: TracingStackFromArgs(args...), Contract: info.Contract, } From 27298fe5a9f87796607f594e3bc48ffc00bc7c52 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Mon, 15 Jul 2024 11:11:25 -0500 Subject: [PATCH 69/69] test pass --- staker/challenge-cache/cache.go | 8 +++++++- staker/challenge-cache/cache_test.go | 24 ++++++------------------ 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/staker/challenge-cache/cache.go b/staker/challenge-cache/cache.go index c2627247a1..ed4fad6450 100644 --- a/staker/challenge-cache/cache.go +++ b/staker/challenge-cache/cache.go @@ -177,7 +177,10 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { messageNumPattern := fmt.Sprintf(`%s-(\d+)-`, messageNumberPrefix) pattern := regexp.MustCompile(messageNumPattern) pathsToDelete := make([]string, 0) - if err := filepath.Walk(c.baseDir, func(path string, info os.FileInfo, err error) error { + if err := filepath.WalkDir(c.baseDir, func(path string, info os.DirEntry, err error) error { + if ctx.Err() != nil { + return ctx.Err() + } if err != nil { return err } @@ -201,6 +204,9 @@ func (c *Cache) Prune(ctx context.Context, messageNumber uint64) error { // We delete separately from collecting the paths, as deleting while walking // a dir can cause issues with the filepath.Walk function. for _, path := range pathsToDelete { + if ctx.Err() != nil { + return ctx.Err() + } if err := os.RemoveAll(path); err != nil { return fmt.Errorf("could not prune directory with path %s: %w", path, err) } diff --git a/staker/challenge-cache/cache_test.go b/staker/challenge-cache/cache_test.go index c0d380fb12..af0a058f78 100644 --- a/staker/challenge-cache/cache_test.go +++ b/staker/challenge-cache/cache_test.go @@ -18,16 +18,12 @@ import ( var _ HistoryCommitmentCacher = (*Cache)(nil) func TestCache(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() basePath := t.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { t.Fatal(err) } - t.Cleanup(func() { - if err := os.RemoveAll(basePath); err != nil { - t.Fatal(err) - } - }) cache, err := New(basePath) if err != nil { t.Fatal(err) @@ -75,16 +71,12 @@ func TestCache(t *testing.T) { } func TestPrune(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() basePath := t.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { t.Fatal(err) } - t.Cleanup(func() { - if err := os.RemoveAll(basePath); err != nil { - t.Fatal(err) - } - }) cache, err := New(basePath) if err != nil { t.Fatal(err) @@ -429,17 +421,13 @@ func Test_determineFilePath(t *testing.T) { } func BenchmarkCache_Read_32Mb(b *testing.B) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() b.StopTimer() basePath := os.TempDir() if err := os.MkdirAll(basePath, os.ModePerm); err != nil { b.Fatal(err) } - b.Cleanup(func() { - if err := os.RemoveAll(basePath); err != nil { - b.Fatal(err) - } - }) cache, err := New(basePath) if err != nil { b.Fatal(err)