diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6192f65a4e..33049d4396 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -59,6 +59,17 @@ jobs: cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max + - name: Start background nitro-testnode + shell: bash + run: | + cd nitro-testnode + ./test-node.bash --init --dev & + + - name: Wait for rpc to come up + shell: bash + run: | + ${{ github.workspace }}/.github/workflows/waitForNitro.sh + - name: Print WAVM module root id: module-root run: | diff --git a/.github/workflows/waitForNitro.sh b/.github/workflows/waitForNitro.sh new file mode 100755 index 0000000000..e196b38d88 --- /dev/null +++ b/.github/workflows/waitForNitro.sh @@ -0,0 +1,10 @@ +# poll the nitro endpoint until we get a 0 return code +while true +do + curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547' + if [ "$?" -eq "0" ]; then + exit 0 + else + sleep 20 + fi +done \ No newline at end of file diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index c82e45fbee..72e4ba2887 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -15,7 +15,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" @@ -24,6 +26,11 @@ import ( "github.com/offchainlabs/nitro/util/containers" ) +var ( + inboxLatestBatchGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch", nil) + inboxLatestBatchMessageGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch/message", nil) +) + type InboxTracker struct { db ethdb.Database txStreamer *TransactionStreamer @@ -676,6 +683,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L "l1Block", latestL1Block, "l1Timestamp", time.Unix(int64(latestTimestamp), 0), ) + inboxLatestBatchGauge.Update(int64(pos)) + inboxLatestBatchMessageGauge.Update(int64(newMessageCount)) if t.validator != nil { t.validator.ReorgToBatchCount(startPos) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index bef0f83d1f..f874b5d71e 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -10,6 +10,7 @@ import ( "fmt" "math/big" "os" + "reflect" "regexp" "runtime" "strings" @@ -296,7 +297,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, err } if initConfig.Prune == "validator" { - if l1Client == nil { + if l1Client == nil || reflect.ValueOf(l1Client).IsNil() { return nil, errors.New("an L1 connection is required for validator pruning") } callOpts := bind.CallOpts{ diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 285cc3fe86..80b21e5ebe 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -61,7 +61,10 @@ import ( ) func printSampleUsage(name string) { - fmt.Printf("Sample usage: %s --help \n", name) + fmt.Printf("Sample usage: %s [OPTIONS] \n\n", name) + fmt.Printf("Options:\n") + fmt.Printf(" --help\n") + fmt.Printf(" --dev: Start a default L2-only dev chain\n") } func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.WalletConfig) (common.Address, error) { diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 18a2b10f2f..6116a492c9 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -138,10 +138,32 @@ func PrintErrorAndExit(err error, usage func(string)) { } } +func devFlagArgs() []string { + args := []string{ + "--init.dev-init", + "--init.dev-init-address", "0x3f1Eae7D46d88F08fc2F8ed27FCb2AB183EB2d0E", + "--node.dangerous.no-l1-listener", + "--node.parent-chain-reader.enable=false", + "--parent-chain.id=1337", + "--chain.id=412346", + "--persistent.chain", "/tmp/dev-test", + "--node.sequencer", + "--node.dangerous.no-sequencer-coordinator", + "--node.staker.enable=false", + "--init.empty=false", + "--http.port", "8547", + "--http.addr", "127.0.0.1", + } + return args +} + func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) { for _, arg := range args { if arg == "--version" || arg == "-v" { return nil, ErrVersion + } else if arg == "--dev" { + args = devFlagArgs() + break } } if err := f.Parse(args); err != nil { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 1068dda967..ede27b26bc 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "reflect" "sync/atomic" "testing" @@ -73,6 +74,7 @@ func (c *Config) Validate() error { func ConfigAddOptions(prefix string, f *flag.FlagSet) { arbitrum.ConfigAddOptions(prefix+".rpc", f) SequencerConfigAddOptions(prefix+".sequencer", f) + headerreader.AddOptions(prefix+".parent-chain-reader", f) arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) @@ -85,6 +87,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { var ConfigDefault = Config{ RPC: arbitrum.DefaultConfig, Sequencer: DefaultSequencerConfig, + ParentChainReader: headerreader.DefaultConfig, RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, ForwardingTarget: "", TxPreChecker: DefaultTxPreCheckerConfig, @@ -96,6 +99,7 @@ var ConfigDefault = Config{ func ConfigDefaultNonSequencerTest() *Config { config := ConfigDefault + config.ParentChainReader = headerreader.Config{} config.Sequencer.Enable = false config.Forwarder = DefaultTestForwarderConfig config.ForwardingTarget = "null" @@ -107,6 +111,7 @@ func ConfigDefaultNonSequencerTest() *Config { func ConfigDefaultTest() *Config { config := ConfigDefault + config.ParentChainReader = headerreader.Config{} config.Sequencer = TestSequencerConfig config.ForwardingTarget = "null" @@ -149,7 +154,7 @@ func CreateExecutionNode( var sequencer *Sequencer var parentChainReader *headerreader.HeaderReader - if l1client != nil { + if l1client != nil && !reflect.ValueOf(l1client).IsNil() { arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys) if err != nil { diff --git a/nitro-testnode b/nitro-testnode index 7ad12c0f1b..aee6ceff9c 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e +Subproject commit aee6ceff9c9d3fb2749da55a7d7842f23d1bfc8e diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 4ea2a16c07..8c0de8c6db 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -46,52 +45,52 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { parallelBatchPosters = 4 } - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.Enable = false - conf.BatchPoster.RedisUrl = redisUrl - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.Enable = false + builder.nodeConfig.BatchPoster.RedisUrl = redisUrl + cleanup := builder.Build(t) + defer cleanup() + l1A, l2A := builder.L1, builder.L2 - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanup2nd() - l2info.GenerateAccount("User2") + builder.L2Info.GenerateAccount("User2") var txs []*types.Transaction for i := 0; i < 100; i++ { - tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil) txs = append(txs, tx) - err := l2clientA.SendTransaction(ctx, tx) + err := l2A.Client.SendTransaction(ctx, tx) Require(t, err) } for _, tx := range txs { - _, err := EnsureTxSucceeded(ctx, l2clientA, tx) + _, err := EnsureTxSucceeded(ctx, l2A.Client, tx) Require(t, err) } firstTxData, err := txs[0].MarshalBinary() Require(t, err) - seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - conf.BatchPoster.Enable = true - conf.BatchPoster.MaxSize = len(firstTxData) * 2 - startL1Block, err := l1client.BlockNumber(ctx) + seqTxOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx) + builder.nodeConfig.BatchPoster.Enable = true + builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2 + startL1Block, err := l1A.Client.BlockNumber(ctx) Require(t, err) for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race - batchPosterConfig := conf.BatchPoster + batchPosterConfig := builder.nodeConfig.BatchPoster batchPoster, err := arbnode.NewBatchPoster(ctx, &arbnode.BatchPosterOpts{ DataPosterDB: nil, - L1Reader: nodeA.L1Reader, - Inbox: nodeA.InboxTracker, - Streamer: nodeA.TxStreamer, - SyncMonitor: nodeA.SyncMonitor, + L1Reader: l2A.ConsensusNode.L1Reader, + Inbox: l2A.ConsensusNode.InboxTracker, + Streamer: l2A.ConsensusNode.TxStreamer, + SyncMonitor: l2A.ConsensusNode.SyncMonitor, Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, - DeployInfo: nodeA.DeployInfo, + DeployInfo: l2A.ConsensusNode.DeployInfo, TransactOpts: &seqTxOpts, DAWriter: nil, }, @@ -103,11 +102,11 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { lastTxHash := txs[len(txs)-1].Hash() for i := 90; i > 0; i-- { - SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{ - l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + SendWaitTestTransactions(t, ctx, l1A.Client, []*types.Transaction{ + builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) time.Sleep(500 * time.Millisecond) - _, err := l2clientB.TransactionReceipt(ctx, lastTxHash) + _, err := l2B.Client.TransactionReceipt(ctx, lastTxHash) if err == nil { break } @@ -122,9 +121,9 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { // However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl. if false { // Make sure the batch poster is able to post multiple batches in one block - endL1Block, err := l1client.BlockNumber(ctx) + endL1Block, err := l1A.Client.BlockNumber(ctx) Require(t, err) - seqInbox, err := arbnode.NewSequencerInbox(l1client, nodeA.DeployInfo.SequencerInbox, 0) + seqInbox, err := arbnode.NewSequencerInbox(l1A.Client, l2A.ConsensusNode.DeployInfo.SequencerInbox, 0) Require(t, err) batches, err := seqInbox.LookupBatchesInRange(ctx, new(big.Int).SetUint64(startL1Block), new(big.Int).SetUint64(endL1Block)) Require(t, err) @@ -144,7 +143,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } - l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil) + l2balance, err := l2B.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) Require(t, err) if l2balance.Sign() == 0 { @@ -157,26 +156,26 @@ func TestBatchPosterLargeTx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := gethexec.ConfigDefaultTest() - conf.Sequencer.MaxTxDataSize = 110000 - l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, conf, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.execConfig.Sequencer.MaxTxDataSize = 110000 + cleanup := builder.Build(t) + defer cleanup() + l2A := builder.L2 - l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil) - defer nodeB.StopAndWait() + l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{}) + defer cleanup2nd() data := make([]byte, 100000) _, err := rand.Read(data) Require(t, err) - faucetAddr := l2info.GetAddress("Faucet") - gas := l2info.TransferGas + 20000*uint64(len(data)) - tx := l2info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) - err = l2clientA.SendTransaction(ctx, tx) + faucetAddr := builder.L2Info.GetAddress("Faucet") + gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) + tx := builder.L2Info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data) + err = l2A.Client.SendTransaction(ctx, tx) Require(t, err) - receiptA, err := EnsureTxSucceeded(ctx, l2clientA, tx) + receiptA, err := EnsureTxSucceeded(ctx, l2A.Client, tx) Require(t, err) - receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2clientB, tx, time.Second*30) + receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2B.Client, tx, time.Second*30) Require(t, err) if receiptA.BlockHash != receiptB.BlockHash { Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash) @@ -188,26 +187,25 @@ func TestBatchPosterKeepsUp(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() - conf.BatchPoster.CompressionLevel = brotli.BestCompression - conf.BatchPoster.MaxDelay = time.Hour - execConf := gethexec.ConfigDefaultTest() - execConf.RPC.RPCTxFeeCap = 1000. - l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, execConf, nil, nil) - defer requireClose(t, l1stack) - defer nodeA.StopAndWait() - l2info.GasPrice = big.NewInt(100e9) + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.BatchPoster.CompressionLevel = brotli.BestCompression + builder.nodeConfig.BatchPoster.MaxDelay = time.Hour + builder.execConfig.RPC.RPCTxFeeCap = 1000. + cleanup := builder.Build(t) + defer cleanup() + l2A := builder.L2 + builder.L2Info.GasPrice = big.NewInt(100e9) go func() { data := make([]byte, 90000) _, err := rand.Read(data) Require(t, err) for { - gas := l2info.TransferGas + 20000*uint64(len(data)) - tx := l2info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) - err = l2clientA.SendTransaction(ctx, tx) + gas := builder.L2Info.TransferGas + 20000*uint64(len(data)) + tx := builder.L2Info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data) + err = l2A.Client.SendTransaction(ctx, tx) Require(t, err) - _, err := EnsureTxSucceeded(ctx, l2clientA, tx) + _, err := EnsureTxSucceeded(ctx, l2A.Client, tx) Require(t, err) } }() @@ -215,11 +213,11 @@ func TestBatchPosterKeepsUp(t *testing.T) { start := time.Now() for { time.Sleep(time.Second) - batches, err := nodeA.InboxTracker.GetBatchCount() + batches, err := l2A.ConsensusNode.InboxTracker.GetBatchCount() Require(t, err) - postedMessages, err := nodeA.InboxTracker.GetBatchMessageCount(batches - 1) + postedMessages, err := l2A.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1) Require(t, err) - haveMessages, err := nodeA.TxStreamer.GetMessageCount() + haveMessages, err := l2A.ConsensusNode.TxStreamer.GetMessageCount() Require(t, err) duration := time.Since(start) fmt.Printf("batches posted: %v over %v (%.2f batches/second)\n", batches, duration, float64(batches)/(float64(duration)/float64(time.Second))) diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go index 14c42f6a2f..9079fd35f1 100644 --- a/system_tests/bloom_test.go +++ b/system_tests/bloom_test.go @@ -17,7 +17,6 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) @@ -25,17 +24,19 @@ func TestBloom(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - execconfig := gethexec.ConfigDefaultTest() - execconfig.RPC.BloomBitsBlocks = 256 - execconfig.RPC.BloomConfirms = 1 - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, execconfig, false) - defer node.StopAndWait() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.execConfig.RPC.BloomBitsBlocks = 256 + builder.execConfig.RPC.BloomConfirms = 1 + builder.takeOwnership = false + cleanup := builder.Build(t) - l2info.GenerateAccount("User2") + defer cleanup() - ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx) + builder.L2Info.GenerateAccount("User2") + + ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) ownerTxOpts.Context = ctx - _, simple := deploySimple(t, ctx, ownerTxOpts, client) + _, simple := deploySimple(t, ctx, ownerTxOpts, builder.L2.Client) simpleABI, err := mocksgen.SimpleMetaData.GetAbi() Require(t, err) @@ -63,7 +64,7 @@ func TestBloom(t *testing.T) { if sendNullEvent { tx, err = simple.EmitNullEvent(&ownerTxOpts) Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) Require(t, err) } @@ -74,15 +75,14 @@ func TestBloom(t *testing.T) { tx, err = simple.Increment(&ownerTxOpts) } Require(t, err) - _, err = EnsureTxSucceeded(ctx, client, tx) + _, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx) Require(t, err) if i%100 == 0 { t.Log("counts: ", i, "/", countsNum) } } - execNode := getExecNode(t, node) for { - sectionSize, sectionNum := execNode.Backend.APIBackend().BloomStatus() + sectionSize, sectionNum := builder.L2.ExecNode.Backend.APIBackend().BloomStatus() if sectionSize != 256 { Fatal(t, "unexpected section size: ", sectionSize) } @@ -92,14 +92,14 @@ func TestBloom(t *testing.T) { } <-time.After(time.Second) } - lastHeader, err := client.HeaderByNumber(ctx, nil) + lastHeader, err := builder.L2.Client.HeaderByNumber(ctx, nil) Require(t, err) nullEventQuery := ethereum.FilterQuery{ FromBlock: big.NewInt(0), ToBlock: lastHeader.Number, Topics: [][]common.Hash{{simpleABI.Events["NullEvent"].ID}}, } - logs, err := client.FilterLogs(ctx, nullEventQuery) + logs, err := builder.L2.Client.FilterLogs(ctx, nullEventQuery) Require(t, err) if len(logs) != len(nullEventCounts) { Fatal(t, "expected ", len(nullEventCounts), " logs, got ", len(logs)) @@ -107,7 +107,7 @@ func TestBloom(t *testing.T) { incrementEventQuery := ethereum.FilterQuery{ Topics: [][]common.Hash{{simpleABI.Events["CounterEvent"].ID}}, } - logs, err = client.FilterLogs(ctx, incrementEventQuery) + logs, err = builder.L2.Client.FilterLogs(ctx, incrementEventQuery) Require(t, err) if len(logs) != len(eventCounts) { Fatal(t, "expected ", len(eventCounts), " logs, got ", len(logs)) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 19357c5b79..d233631d4c 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -8,7 +8,6 @@ import ( "context" "encoding/hex" "encoding/json" - "fmt" "math/big" "net" "os" @@ -60,6 +59,167 @@ import ( type info = *BlockchainTestInfo type client = arbutil.L1Interface +type SecondNodeParams struct { + nodeConfig *arbnode.Config + execConfig *gethexec.Config + stackConfig *node.Config + dasConfig *das.DataAvailabilityConfig + initData *statetransfer.ArbosInitializationInfo +} + +type TestClient struct { + ctx context.Context + Client *ethclient.Client + L1Backend *eth.Ethereum + Stack *node.Node + ConsensusNode *arbnode.Node + ExecNode *gethexec.ExecutionNode + + // having cleanup() field makes cleanup customizable from default cleanup methods after calling build + cleanup func() +} + +func NewTestClient(ctx context.Context) *TestClient { + return &TestClient{ctx: ctx} +} + +func (tc *TestClient) SendSignedTx(t *testing.T, l2Client *ethclient.Client, transaction *types.Transaction, lInfo info) *types.Receipt { + return SendSignedTxViaL1(t, tc.ctx, lInfo, tc.Client, l2Client, transaction) +} + +func (tc *TestClient) SendUnsignedTx(t *testing.T, l2Client *ethclient.Client, transaction *types.Transaction, lInfo info) *types.Receipt { + return SendUnsignedTxViaL1(t, tc.ctx, lInfo, tc.Client, l2Client, transaction) +} + +func (tc *TestClient) TransferBalance(t *testing.T, from string, to string, amount *big.Int, lInfo info) (*types.Transaction, *types.Receipt) { + return TransferBalanceTo(t, from, lInfo.GetAddress(to), amount, lInfo, tc.Client, tc.ctx) +} + +func (tc *TestClient) TransferBalanceTo(t *testing.T, from string, to common.Address, amount *big.Int, lInfo info) (*types.Transaction, *types.Receipt) { + return TransferBalanceTo(t, from, to, amount, lInfo, tc.Client, tc.ctx) +} + +func (tc *TestClient) GetBalance(t *testing.T, account common.Address) *big.Int { + return GetBalance(t, tc.ctx, tc.Client, account) +} + +func (tc *TestClient) GetBaseFeeAt(t *testing.T, blockNum *big.Int) *big.Int { + return GetBaseFeeAt(t, tc.Client, tc.ctx, blockNum) +} + +func (tc *TestClient) SendWaitTestTransactions(t *testing.T, txs []*types.Transaction) { + SendWaitTestTransactions(t, tc.ctx, tc.Client, txs) +} + +func (tc *TestClient) DeploySimple(t *testing.T, auth bind.TransactOpts) (common.Address, *mocksgen.Simple) { + return deploySimple(t, tc.ctx, auth, tc.Client) +} + +type NodeBuilder struct { + // NodeBuilder configuration + ctx context.Context + chainConfig *params.ChainConfig + nodeConfig *arbnode.Config + execConfig *gethexec.Config + l1StackConfig *node.Config + l2StackConfig *node.Config + L1Info info + L2Info info + + // L1, L2 Node parameters + dataDir string + isSequencer bool + takeOwnership bool + withL1 bool + + // Created nodes + L1 *TestClient + L2 *TestClient +} + +func NewNodeBuilder(ctx context.Context) *NodeBuilder { + return &NodeBuilder{ctx: ctx} +} + +func (b *NodeBuilder) DefaultConfig(t *testing.T, withL1 bool) *NodeBuilder { + // most used values across current tests are set here as default + b.withL1 = withL1 + if withL1 { + b.isSequencer = true + b.nodeConfig = arbnode.ConfigDefaultL1Test() + } else { + b.takeOwnership = true + b.nodeConfig = arbnode.ConfigDefaultL2Test() + } + b.chainConfig = params.ArbitrumDevTestChainConfig() + b.L1Info = NewL1TestInfo(t) + b.L2Info = NewArbTestInfo(t, b.chainConfig.ChainID) + b.dataDir = t.TempDir() + b.l1StackConfig = createStackConfigForTest(b.dataDir) + b.l2StackConfig = createStackConfigForTest(b.dataDir) + b.execConfig = gethexec.ConfigDefaultTest() + return b +} + +func (b *NodeBuilder) Build(t *testing.T) func() { + if b.withL1 { + l1, l2 := NewTestClient(b.ctx), NewTestClient(b.ctx) + b.L2Info, l2.ConsensusNode, l2.Client, l2.Stack, b.L1Info, l1.L1Backend, l1.Client, l1.Stack = + createTestNodeOnL1WithConfigImpl(t, b.ctx, b.isSequencer, b.nodeConfig, b.execConfig, b.chainConfig, b.l2StackConfig, b.L2Info) + b.L1, b.L2 = l1, l2 + b.L1.cleanup = func() { requireClose(t, b.L1.Stack) } + } else { + l2 := NewTestClient(b.ctx) + b.L2Info, l2.ConsensusNode, l2.Client = + CreateTestL2WithConfig(t, b.ctx, b.L2Info, b.nodeConfig, b.execConfig, b.takeOwnership) + b.L2 = l2 + } + b.L2.ExecNode = getExecNode(t, b.L2.ConsensusNode) + b.L2.cleanup = func() { b.L2.ConsensusNode.StopAndWait() } + return func() { + b.L2.cleanup() + if b.L1 != nil && b.L1.cleanup != nil { + b.L1.cleanup() + } + } +} + +func (b *NodeBuilder) Build2ndNode(t *testing.T, params *SecondNodeParams) (*TestClient, func()) { + if b.L2 == nil { + t.Fatal("builder did not previously build a L2 Node") + } + if b.withL1 && b.L1 == nil { + t.Fatal("builder did not previously build a L1 Node") + } + if params.nodeConfig == nil { + params.nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() + } + if params.dasConfig != nil { + params.nodeConfig.DataAvailability = *params.dasConfig + } + if params.stackConfig == nil { + params.stackConfig = b.l2StackConfig + // should use different dataDir from the previously used ones + params.stackConfig.DataDir = t.TempDir() + } + if params.initData == nil { + params.initData = &b.L2Info.ArbInitData + } + if params.execConfig == nil { + params.execConfig = b.execConfig + } + + l2 := NewTestClient(b.ctx) + l2.Client, l2.ConsensusNode = + Create2ndNodeWithConfig(t, b.ctx, b.L2.ConsensusNode, b.L1.Stack, b.L1Info, params.initData, params.nodeConfig, params.execConfig, params.stackConfig) + l2.cleanup = func() { l2.ConsensusNode.StopAndWait() } + return l2, func() { l2.cleanup() } +} + +func (b *NodeBuilder) BridgeBalance(t *testing.T, account string, amount *big.Int) (*types.Transaction, *types.Receipt) { + return BridgeBalance(t, account, amount, b.L1Info, b.L2Info, b.L1.Client, b.L2.Client, b.ctx) +} + func SendWaitTestTransactions(t *testing.T, ctx context.Context, client client, txs []*types.Transaction) { t.Helper() for _, tx := range txs { @@ -290,33 +450,19 @@ func createTestL1BlockChain(t *testing.T, l1info info) (info, *ethclient.Client, return createTestL1BlockChainWithConfig(t, l1info, nil) } -func stackConfigForTest(t *testing.T) *node.Config { - stackConfig := node.DefaultConfig - stackConfig.HTTPPort = 0 - stackConfig.WSPort = 0 - stackConfig.UseLightweightKDF = true - stackConfig.P2P.ListenAddr = "" - stackConfig.P2P.NoDial = true - stackConfig.P2P.NoDiscovery = true - stackConfig.P2P.NAT = nil - stackConfig.DataDir = t.TempDir() - return &stackConfig -} - -func createDefaultStackForTest(dataDir string) (*node.Node, error) { +func createStackConfigForTest(dataDir string) *node.Config { stackConf := node.DefaultConfig - var err error stackConf.DataDir = dataDir + stackConf.UseLightweightKDF = true + stackConf.WSPort = 0 + stackConf.HTTPPort = 0 stackConf.HTTPHost = "" stackConf.HTTPModules = append(stackConf.HTTPModules, "eth") stackConf.P2P.NoDiscovery = true + stackConf.P2P.NoDial = true stackConf.P2P.ListenAddr = "" - - stack, err := node.New(&stackConf) - if err != nil { - return nil, fmt.Errorf("error creating protocol stack: %w", err) - } - return stack, nil + stackConf.P2P.NAT = nil + return &stackConf } func createTestValidationNode(t *testing.T, ctx context.Context, config *valnode.Config) (*valnode.ValidationNode, *node.Node) { @@ -392,7 +538,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no l1info = NewL1TestInfo(t) } if stackConfig == nil { - stackConfig = stackConfigForTest(t) + stackConfig = createStackConfigForTest(t.TempDir()) } l1info.GenerateAccount("Faucet") @@ -513,12 +659,10 @@ func createL2BlockChainWithStackConfig( var stack *node.Node var err error if stackConfig == nil { - stack, err = createDefaultStackForTest(dataDir) - Require(t, err) - } else { - stack, err = node.New(stackConfig) - Require(t, err) + stackConfig = createStackConfigForTest(dataDir) } + stack, err = node.New(stackConfig) + Require(t, err) chainDb, err := stack.OpenDatabase("chaindb", 0, 0, "", false) Require(t, err) @@ -773,7 +917,7 @@ func Create2ndNodeWithConfig( l1client := ethclient.NewClient(l1rpcClient) if stackConfig == nil { - stackConfig = stackConfigForTest(t) + stackConfig = createStackConfigForTest(t.TempDir()) } l2stack, err := node.New(stackConfig) Require(t, err) diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8c1588273b..c7dd177ab8 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" @@ -171,7 +172,8 @@ func TestDASRekey(t *testing.T) { // Restart the node on the new keyset against the new DAS server running on the same disk as the first with new keys - l2stackA, err := createDefaultStackForTest(nodeDir) + stackConfig := createStackConfigForTest(nodeDir) + l2stackA, err := node.New(stackConfig) Require(t, err) l2chainDb, err := l2stackA.OpenDatabase("chaindb", 0, 0, "", false) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 2e0544cc26..fc7eb4cc2d 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -35,7 +35,7 @@ func TestStaticForwarder(t *testing.T) { ipcPath := tmpPath(t, "test.ipc") ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) ipcConfig.Apply(stackConfig) nodeConfigA := arbnode.ConfigDefaultL1Test() nodeConfigA.BatchPoster.Enable = false @@ -99,7 +99,7 @@ func fallbackSequencer( ctx context.Context, t *testing.T, opts *fallbackSequencerOpts, ) (l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node) { - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = opts.ipcPath ipcConfig.Apply(stackConfig) @@ -120,7 +120,7 @@ func createForwardingNode( redisUrl string, fallbackPath string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) if ipcPath != "" { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath @@ -148,7 +148,7 @@ func createSequencer( ipcPath string, redisUrl string, ) (*ethclient.Client, *arbnode.Node) { - stackConfig := stackConfigForTest(t) + stackConfig := createStackConfigForTest(t.TempDir()) ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index e25b4a21ea..dc73825a13 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -18,7 +18,7 @@ func TestIpcRpc(t *testing.T) { ipcConfig := genericconf.IPCConfigDefault ipcConfig.Path = ipcPath - stackConf := stackConfigForTest(t) + stackConf := createStackConfigForTest(t.TempDir()) ipcConfig.Apply(stackConf) ctx, cancel := context.WithCancel(context.Background())