Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Node builder pattern for running test nodes in tests #1883

Merged
merged 16 commits into from
Oct 13, 2023
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 58 additions & 52 deletions system_tests/batch_poster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,56 +45,59 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
parallelBatchPosters = 4
}

conf := arbnode.ConfigDefaultL1Test()
conf.BatchPoster.Enable = false
conf.BatchPoster.RedisUrl = redisUrl
l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil)
defer requireClose(t, l1stack)
defer nodeA.StopAndWait()
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.nodeConfig.BatchPoster.Enable = false
builder.nodeConfig.BatchPoster.RedisUrl = redisUrl
cleanup := builder.Build(t)
defer cleanup()
l1A, l2A := builder.L1, builder.L2

l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil)
defer nodeB.StopAndWait()
params := make(SecondNodeParams)
params["nodeConfig"] = nil
params["dasConfig"] = nil
l2B, cleanup2nd := builder.Build2ndNode(t, params)
defer cleanup2nd()

l2info.GenerateAccount("User2")
builder.L2Info.GenerateAccount("User2")

var txs []*types.Transaction

for i := 0; i < 100; i++ {
tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil)
tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil)
txs = append(txs, tx)

err := l2clientA.SendTransaction(ctx, tx)
err := l2A.Client.SendTransaction(ctx, tx)
Require(t, err)
}

for _, tx := range txs {
_, err := EnsureTxSucceeded(ctx, l2clientA, tx)
_, err := EnsureTxSucceeded(ctx, l2A.Client, tx)
Require(t, err)
}

firstTxData, err := txs[0].MarshalBinary()
Require(t, err)
seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx)
conf.BatchPoster.Enable = true
conf.BatchPoster.MaxSize = len(firstTxData) * 2
startL1Block, err := l1client.BlockNumber(ctx)
seqTxOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx)
builder.nodeConfig.BatchPoster.Enable = true
builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2
startL1Block, err := l1A.Client.BlockNumber(ctx)
Require(t, err)
for i := 0; i < parallelBatchPosters; i++ {
// Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race
batchPosterConfig := conf.BatchPoster
batchPoster, err := arbnode.NewBatchPoster(ctx, nil, nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil)
batchPosterConfig := builder.nodeConfig.BatchPoster
batchPoster, err := arbnode.NewBatchPoster(ctx, nil, l2A.Node.L1Reader, l2A.Node.InboxTracker, l2A.Node.TxStreamer, l2A.Node.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, l2A.Node.DeployInfo, &seqTxOpts, nil)
Require(t, err)
batchPoster.Start(ctx)
defer batchPoster.StopAndWait()
}

lastTxHash := txs[len(txs)-1].Hash()
for i := 90; i > 0; i-- {
SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{
l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil),
SendWaitTestTransactions(t, ctx, l1A.Client, []*types.Transaction{
builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil),
})
time.Sleep(500 * time.Millisecond)
_, err := l2clientB.TransactionReceipt(ctx, lastTxHash)
_, err := l2B.Client.TransactionReceipt(ctx, lastTxHash)
if err == nil {
break
}
Expand All @@ -107,9 +110,9 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
// However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl.
if false {
// Make sure the batch poster is able to post multiple batches in one block
endL1Block, err := l1client.BlockNumber(ctx)
endL1Block, err := l1A.Client.BlockNumber(ctx)
Require(t, err)
seqInbox, err := arbnode.NewSequencerInbox(l1client, nodeA.DeployInfo.SequencerInbox, 0)
seqInbox, err := arbnode.NewSequencerInbox(l1A.Client, l2A.Node.DeployInfo.SequencerInbox, 0)
Require(t, err)
batches, err := seqInbox.LookupBatchesInRange(ctx, new(big.Int).SetUint64(startL1Block), new(big.Int).SetUint64(endL1Block))
Require(t, err)
Expand All @@ -129,7 +132,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
}
}

l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil)
l2balance, err := l2B.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil)
Require(t, err)

if l2balance.Sign() == 0 {
Expand All @@ -142,26 +145,29 @@ func TestBatchPosterLargeTx(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

conf := arbnode.ConfigDefaultL1Test()
conf.Sequencer.MaxTxDataSize = 110000
l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil)
defer requireClose(t, l1stack)
defer nodeA.StopAndWait()
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.nodeConfig.Sequencer.MaxTxDataSize = 110000
cleanup := builder.Build(t)
defer cleanup()
l2A := builder.L2

l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil)
defer nodeB.StopAndWait()
params := make(SecondNodeParams)
params["nodeConfig"] = nil
params["dasConfig"] = nil
l2B, cleanup2nd := builder.Build2ndNode(t, params)
defer cleanup2nd()

data := make([]byte, 100000)
_, err := rand.Read(data)
Require(t, err)
faucetAddr := l2info.GetAddress("Faucet")
gas := l2info.TransferGas + 20000*uint64(len(data))
tx := l2info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data)
err = l2clientA.SendTransaction(ctx, tx)
faucetAddr := builder.L2Info.GetAddress("Faucet")
gas := builder.L2Info.TransferGas + 20000*uint64(len(data))
tx := builder.L2Info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data)
err = l2A.Client.SendTransaction(ctx, tx)
Require(t, err)
receiptA, err := EnsureTxSucceeded(ctx, l2clientA, tx)
receiptA, err := EnsureTxSucceeded(ctx, l2A.Client, tx)
Require(t, err)
receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2clientB, tx, time.Second*30)
receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2B.Client, tx, time.Second*30)
Require(t, err)
if receiptA.BlockHash != receiptB.BlockHash {
Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash)
Expand All @@ -173,37 +179,37 @@ func TestBatchPosterKeepsUp(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

conf := arbnode.ConfigDefaultL1Test()
conf.BatchPoster.CompressionLevel = brotli.BestCompression
conf.BatchPoster.MaxDelay = time.Hour
conf.RPC.RPCTxFeeCap = 1000.
l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil)
defer requireClose(t, l1stack)
defer nodeA.StopAndWait()
l2info.GasPrice = big.NewInt(100e9)
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.nodeConfig.BatchPoster.CompressionLevel = brotli.BestCompression
builder.nodeConfig.BatchPoster.MaxDelay = time.Hour
builder.nodeConfig.RPC.RPCTxFeeCap = 1000.
cleanup := builder.Build(t)
defer cleanup()
l2A := builder.L2
builder.L2Info.GasPrice = big.NewInt(100e9)

go func() {
data := make([]byte, 90000)
_, err := rand.Read(data)
Require(t, err)
for {
gas := l2info.TransferGas + 20000*uint64(len(data))
tx := l2info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data)
err = l2clientA.SendTransaction(ctx, tx)
gas := builder.L2Info.TransferGas + 20000*uint64(len(data))
tx := builder.L2Info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data)
err = l2A.Client.SendTransaction(ctx, tx)
Require(t, err)
_, err := EnsureTxSucceeded(ctx, l2clientA, tx)
_, err := EnsureTxSucceeded(ctx, l2A.Client, tx)
Require(t, err)
}
}()

start := time.Now()
for {
time.Sleep(time.Second)
batches, err := nodeA.InboxTracker.GetBatchCount()
batches, err := l2A.Node.InboxTracker.GetBatchCount()
Require(t, err)
postedMessages, err := nodeA.InboxTracker.GetBatchMessageCount(batches - 1)
postedMessages, err := l2A.Node.InboxTracker.GetBatchMessageCount(batches - 1)
Require(t, err)
haveMessages, err := nodeA.TxStreamer.GetMessageCount()
haveMessages, err := l2A.Node.TxStreamer.GetMessageCount()
Require(t, err)
duration := time.Since(start)
fmt.Printf("batches posted: %v over %v (%.2f batches/second)\n", batches, duration, float64(batches)/(float64(duration)/float64(time.Second)))
Expand Down
31 changes: 16 additions & 15 deletions system_tests/bloom_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,26 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/solgen/go/mocksgen"
)

func TestBloom(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeconfig := arbnode.ConfigDefaultL2Test()
nodeconfig.RPC.BloomBitsBlocks = 256
nodeconfig.RPC.BloomConfirms = 1
l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nodeconfig, false)
defer node.StopAndWait()
builder := NewNodeBuilder(ctx).DefaultConfig(t, false)
builder.nodeConfig.RPC.BloomBitsBlocks = 256
builder.nodeConfig.RPC.BloomConfirms = 1
builder.takeOwnership = false
cleanup := builder.Build(t)

l2info.GenerateAccount("User2")
defer cleanup()

ownerTxOpts := l2info.GetDefaultTransactOpts("Owner", ctx)
builder.L2Info.GenerateAccount("User2")

ownerTxOpts := builder.L2Info.GetDefaultTransactOpts("Owner", ctx)
ownerTxOpts.Context = ctx
_, simple := deploySimple(t, ctx, ownerTxOpts, client)
_, simple := deploySimple(t, ctx, ownerTxOpts, builder.L2.Client)
simpleABI, err := mocksgen.SimpleMetaData.GetAbi()
Require(t, err)

Expand Down Expand Up @@ -63,7 +64,7 @@ func TestBloom(t *testing.T) {
if sendNullEvent {
tx, err = simple.EmitNullEvent(&ownerTxOpts)
Require(t, err)
_, err = EnsureTxSucceeded(ctx, client, tx)
_, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx)
Require(t, err)
}

Expand All @@ -74,15 +75,15 @@ func TestBloom(t *testing.T) {
tx, err = simple.Increment(&ownerTxOpts)
}
Require(t, err)
_, err = EnsureTxSucceeded(ctx, client, tx)
_, err = EnsureTxSucceeded(ctx, builder.L2.Client, tx)
Require(t, err)
if i%100 == 0 {
t.Log("counts: ", i, "/", countsNum)
}
}

for {
sectionSize, sectionNum := node.Execution.Backend.APIBackend().BloomStatus()
sectionSize, sectionNum := builder.L2.Node.Execution.Backend.APIBackend().BloomStatus()
if sectionSize != 256 {
Fatal(t, "unexpected section size: ", sectionSize)
}
Expand All @@ -92,22 +93,22 @@ func TestBloom(t *testing.T) {
}
<-time.After(time.Second)
}
lastHeader, err := client.HeaderByNumber(ctx, nil)
lastHeader, err := builder.L2.Client.HeaderByNumber(ctx, nil)
Require(t, err)
nullEventQuery := ethereum.FilterQuery{
FromBlock: big.NewInt(0),
ToBlock: lastHeader.Number,
Topics: [][]common.Hash{{simpleABI.Events["NullEvent"].ID}},
}
logs, err := client.FilterLogs(ctx, nullEventQuery)
logs, err := builder.L2.Client.FilterLogs(ctx, nullEventQuery)
Require(t, err)
if len(logs) != len(nullEventCounts) {
Fatal(t, "expected ", len(nullEventCounts), " logs, got ", len(logs))
}
incrementEventQuery := ethereum.FilterQuery{
Topics: [][]common.Hash{{simpleABI.Events["CounterEvent"].ID}},
}
logs, err = client.FilterLogs(ctx, incrementEventQuery)
logs, err = builder.L2.Client.FilterLogs(ctx, incrementEventQuery)
Require(t, err)
if len(logs) != len(eventCounts) {
Fatal(t, "expected ", len(eventCounts), " logs, got ", len(logs))
Expand Down
Loading
Loading