Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
joshuacolvin0 authored Oct 13, 2023
2 parents eb3c341 + 5ae8502 commit 206ee1f
Show file tree
Hide file tree
Showing 14 changed files with 318 additions and 113 deletions.
11 changes: 11 additions & 0 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,17 @@ jobs:
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max

- name: Start background nitro-testnode
shell: bash
run: |
cd nitro-testnode
./test-node.bash --init --dev &
- name: Wait for rpc to come up
shell: bash
run: |
${{ github.workspace }}/.github/workflows/waitForNitro.sh
- name: Print WAVM module root
id: module-root
run: |
Expand Down
10 changes: 10 additions & 0 deletions .github/workflows/waitForNitro.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# poll the nitro endpoint until we get a 0 return code
while true
do
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":45678,"method":"eth_chainId","params":[]}' 'http://localhost:8547'
if [ "$?" -eq "0" ]; then
exit 0
else
sleep 20
fi
done
9 changes: 9 additions & 0 deletions arbnode/inbox_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"

"github.com/offchainlabs/nitro/arbos/arbostypes"
"github.com/offchainlabs/nitro/arbstate"
"github.com/offchainlabs/nitro/arbutil"
Expand All @@ -24,6 +26,11 @@ import (
"github.com/offchainlabs/nitro/util/containers"
)

var (
inboxLatestBatchGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch", nil)
inboxLatestBatchMessageGauge = metrics.NewRegisteredGauge("arb/inbox/latest/batch/message", nil)
)

type InboxTracker struct {
db ethdb.Database
txStreamer *TransactionStreamer
Expand Down Expand Up @@ -676,6 +683,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L
"l1Block", latestL1Block,
"l1Timestamp", time.Unix(int64(latestTimestamp), 0),
)
inboxLatestBatchGauge.Update(int64(pos))
inboxLatestBatchMessageGauge.Update(int64(newMessageCount))

if t.validator != nil {
t.validator.ReorgToBatchCount(startPos)
Expand Down
3 changes: 2 additions & 1 deletion cmd/nitro/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"fmt"
"math/big"
"os"
"reflect"
"regexp"
"runtime"
"strings"
Expand Down Expand Up @@ -296,7 +297,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node
return nil, err
}
if initConfig.Prune == "validator" {
if l1Client == nil {
if l1Client == nil || reflect.ValueOf(l1Client).IsNil() {
return nil, errors.New("an L1 connection is required for validator pruning")
}
callOpts := bind.CallOpts{
Expand Down
5 changes: 4 additions & 1 deletion cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,10 @@ import (
)

func printSampleUsage(name string) {
fmt.Printf("Sample usage: %s --help \n", name)
fmt.Printf("Sample usage: %s [OPTIONS] \n\n", name)
fmt.Printf("Options:\n")
fmt.Printf(" --help\n")
fmt.Printf(" --dev: Start a default L2-only dev chain\n")
}

func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.WalletConfig) (common.Address, error) {
Expand Down
22 changes: 22 additions & 0 deletions cmd/util/confighelpers/configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,10 +138,32 @@ func PrintErrorAndExit(err error, usage func(string)) {
}
}

func devFlagArgs() []string {
args := []string{
"--init.dev-init",
"--init.dev-init-address", "0x3f1Eae7D46d88F08fc2F8ed27FCb2AB183EB2d0E",
"--node.dangerous.no-l1-listener",
"--node.parent-chain-reader.enable=false",
"--parent-chain.id=1337",
"--chain.id=412346",
"--persistent.chain", "/tmp/dev-test",
"--node.sequencer",
"--node.dangerous.no-sequencer-coordinator",
"--node.staker.enable=false",
"--init.empty=false",
"--http.port", "8547",
"--http.addr", "127.0.0.1",
}
return args
}

func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) {
for _, arg := range args {
if arg == "--version" || arg == "-v" {
return nil, ErrVersion
} else if arg == "--dev" {
args = devFlagArgs()
break
}
}
if err := f.Parse(args); err != nil {
Expand Down
7 changes: 6 additions & 1 deletion execution/gethexec/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"reflect"
"sync/atomic"
"testing"

Expand Down Expand Up @@ -73,6 +74,7 @@ func (c *Config) Validate() error {
func ConfigAddOptions(prefix string, f *flag.FlagSet) {
arbitrum.ConfigAddOptions(prefix+".rpc", f)
SequencerConfigAddOptions(prefix+".sequencer", f)
headerreader.AddOptions(prefix+".parent-chain-reader", f)
arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f)
f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)")
AddOptionsForNodeForwarderConfig(prefix+".forwarder", f)
Expand All @@ -85,6 +87,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) {
var ConfigDefault = Config{
RPC: arbitrum.DefaultConfig,
Sequencer: DefaultSequencerConfig,
ParentChainReader: headerreader.DefaultConfig,
RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig,
ForwardingTarget: "",
TxPreChecker: DefaultTxPreCheckerConfig,
Expand All @@ -96,6 +99,7 @@ var ConfigDefault = Config{

func ConfigDefaultNonSequencerTest() *Config {
config := ConfigDefault
config.ParentChainReader = headerreader.Config{}
config.Sequencer.Enable = false
config.Forwarder = DefaultTestForwarderConfig
config.ForwardingTarget = "null"
Expand All @@ -107,6 +111,7 @@ func ConfigDefaultNonSequencerTest() *Config {

func ConfigDefaultTest() *Config {
config := ConfigDefault
config.ParentChainReader = headerreader.Config{}
config.Sequencer = TestSequencerConfig
config.ForwardingTarget = "null"

Expand Down Expand Up @@ -149,7 +154,7 @@ func CreateExecutionNode(
var sequencer *Sequencer

var parentChainReader *headerreader.HeaderReader
if l1client != nil {
if l1client != nil && !reflect.ValueOf(l1client).IsNil() {
arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client)
parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion nitro-testnode
114 changes: 56 additions & 58 deletions system_tests/batch_poster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"

"github.com/offchainlabs/nitro/arbnode"
"github.com/offchainlabs/nitro/execution/gethexec"
"github.com/offchainlabs/nitro/util/redisutil"
)

Expand Down Expand Up @@ -46,52 +45,52 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
parallelBatchPosters = 4
}

conf := arbnode.ConfigDefaultL1Test()
conf.BatchPoster.Enable = false
conf.BatchPoster.RedisUrl = redisUrl
l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil)
defer requireClose(t, l1stack)
defer nodeA.StopAndWait()
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.nodeConfig.BatchPoster.Enable = false
builder.nodeConfig.BatchPoster.RedisUrl = redisUrl
cleanup := builder.Build(t)
defer cleanup()
l1A, l2A := builder.L1, builder.L2

l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil)
defer nodeB.StopAndWait()
l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{})
defer cleanup2nd()

l2info.GenerateAccount("User2")
builder.L2Info.GenerateAccount("User2")

var txs []*types.Transaction

for i := 0; i < 100; i++ {
tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, common.Big1, nil)
tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, common.Big1, nil)
txs = append(txs, tx)

err := l2clientA.SendTransaction(ctx, tx)
err := l2A.Client.SendTransaction(ctx, tx)
Require(t, err)
}

for _, tx := range txs {
_, err := EnsureTxSucceeded(ctx, l2clientA, tx)
_, err := EnsureTxSucceeded(ctx, l2A.Client, tx)
Require(t, err)
}

firstTxData, err := txs[0].MarshalBinary()
Require(t, err)
seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx)
conf.BatchPoster.Enable = true
conf.BatchPoster.MaxSize = len(firstTxData) * 2
startL1Block, err := l1client.BlockNumber(ctx)
seqTxOpts := builder.L1Info.GetDefaultTransactOpts("Sequencer", ctx)
builder.nodeConfig.BatchPoster.Enable = true
builder.nodeConfig.BatchPoster.MaxSize = len(firstTxData) * 2
startL1Block, err := l1A.Client.BlockNumber(ctx)
Require(t, err)
for i := 0; i < parallelBatchPosters; i++ {
// Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race
batchPosterConfig := conf.BatchPoster
batchPosterConfig := builder.nodeConfig.BatchPoster
batchPoster, err := arbnode.NewBatchPoster(ctx,
&arbnode.BatchPosterOpts{
DataPosterDB: nil,
L1Reader: nodeA.L1Reader,
Inbox: nodeA.InboxTracker,
Streamer: nodeA.TxStreamer,
SyncMonitor: nodeA.SyncMonitor,
L1Reader: l2A.ConsensusNode.L1Reader,
Inbox: l2A.ConsensusNode.InboxTracker,
Streamer: l2A.ConsensusNode.TxStreamer,
SyncMonitor: l2A.ConsensusNode.SyncMonitor,
Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig },
DeployInfo: nodeA.DeployInfo,
DeployInfo: l2A.ConsensusNode.DeployInfo,
TransactOpts: &seqTxOpts,
DAWriter: nil,
},
Expand All @@ -103,11 +102,11 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {

lastTxHash := txs[len(txs)-1].Hash()
for i := 90; i > 0; i-- {
SendWaitTestTransactions(t, ctx, l1client, []*types.Transaction{
l1info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil),
SendWaitTestTransactions(t, ctx, l1A.Client, []*types.Transaction{
builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil),
})
time.Sleep(500 * time.Millisecond)
_, err := l2clientB.TransactionReceipt(ctx, lastTxHash)
_, err := l2B.Client.TransactionReceipt(ctx, lastTxHash)
if err == nil {
break
}
Expand All @@ -122,9 +121,9 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
// However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl.
if false {
// Make sure the batch poster is able to post multiple batches in one block
endL1Block, err := l1client.BlockNumber(ctx)
endL1Block, err := l1A.Client.BlockNumber(ctx)
Require(t, err)
seqInbox, err := arbnode.NewSequencerInbox(l1client, nodeA.DeployInfo.SequencerInbox, 0)
seqInbox, err := arbnode.NewSequencerInbox(l1A.Client, l2A.ConsensusNode.DeployInfo.SequencerInbox, 0)
Require(t, err)
batches, err := seqInbox.LookupBatchesInRange(ctx, new(big.Int).SetUint64(startL1Block), new(big.Int).SetUint64(endL1Block))
Require(t, err)
Expand All @@ -144,7 +143,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) {
}
}

l2balance, err := l2clientB.BalanceAt(ctx, l2info.GetAddress("User2"), nil)
l2balance, err := l2B.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil)
Require(t, err)

if l2balance.Sign() == 0 {
Expand All @@ -157,26 +156,26 @@ func TestBatchPosterLargeTx(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

conf := gethexec.ConfigDefaultTest()
conf.Sequencer.MaxTxDataSize = 110000
l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, conf, nil, nil)
defer requireClose(t, l1stack)
defer nodeA.StopAndWait()
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.execConfig.Sequencer.MaxTxDataSize = 110000
cleanup := builder.Build(t)
defer cleanup()
l2A := builder.L2

l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nil)
defer nodeB.StopAndWait()
l2B, cleanup2nd := builder.Build2ndNode(t, &SecondNodeParams{})
defer cleanup2nd()

data := make([]byte, 100000)
_, err := rand.Read(data)
Require(t, err)
faucetAddr := l2info.GetAddress("Faucet")
gas := l2info.TransferGas + 20000*uint64(len(data))
tx := l2info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data)
err = l2clientA.SendTransaction(ctx, tx)
faucetAddr := builder.L2Info.GetAddress("Faucet")
gas := builder.L2Info.TransferGas + 20000*uint64(len(data))
tx := builder.L2Info.PrepareTxTo("Faucet", &faucetAddr, gas, common.Big0, data)
err = l2A.Client.SendTransaction(ctx, tx)
Require(t, err)
receiptA, err := EnsureTxSucceeded(ctx, l2clientA, tx)
receiptA, err := EnsureTxSucceeded(ctx, l2A.Client, tx)
Require(t, err)
receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2clientB, tx, time.Second*30)
receiptB, err := EnsureTxSucceededWithTimeout(ctx, l2B.Client, tx, time.Second*30)
Require(t, err)
if receiptA.BlockHash != receiptB.BlockHash {
Fatal(t, "receipt A block hash", receiptA.BlockHash, "does not equal receipt B block hash", receiptB.BlockHash)
Expand All @@ -188,38 +187,37 @@ func TestBatchPosterKeepsUp(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

conf := arbnode.ConfigDefaultL1Test()
conf.BatchPoster.CompressionLevel = brotli.BestCompression
conf.BatchPoster.MaxDelay = time.Hour
execConf := gethexec.ConfigDefaultTest()
execConf.RPC.RPCTxFeeCap = 1000.
l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, execConf, nil, nil)
defer requireClose(t, l1stack)
defer nodeA.StopAndWait()
l2info.GasPrice = big.NewInt(100e9)
builder := NewNodeBuilder(ctx).DefaultConfig(t, true)
builder.nodeConfig.BatchPoster.CompressionLevel = brotli.BestCompression
builder.nodeConfig.BatchPoster.MaxDelay = time.Hour
builder.execConfig.RPC.RPCTxFeeCap = 1000.
cleanup := builder.Build(t)
defer cleanup()
l2A := builder.L2
builder.L2Info.GasPrice = big.NewInt(100e9)

go func() {
data := make([]byte, 90000)
_, err := rand.Read(data)
Require(t, err)
for {
gas := l2info.TransferGas + 20000*uint64(len(data))
tx := l2info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data)
err = l2clientA.SendTransaction(ctx, tx)
gas := builder.L2Info.TransferGas + 20000*uint64(len(data))
tx := builder.L2Info.PrepareTx("Faucet", "Faucet", gas, common.Big0, data)
err = l2A.Client.SendTransaction(ctx, tx)
Require(t, err)
_, err := EnsureTxSucceeded(ctx, l2clientA, tx)
_, err := EnsureTxSucceeded(ctx, l2A.Client, tx)
Require(t, err)
}
}()

start := time.Now()
for {
time.Sleep(time.Second)
batches, err := nodeA.InboxTracker.GetBatchCount()
batches, err := l2A.ConsensusNode.InboxTracker.GetBatchCount()
Require(t, err)
postedMessages, err := nodeA.InboxTracker.GetBatchMessageCount(batches - 1)
postedMessages, err := l2A.ConsensusNode.InboxTracker.GetBatchMessageCount(batches - 1)
Require(t, err)
haveMessages, err := nodeA.TxStreamer.GetMessageCount()
haveMessages, err := l2A.ConsensusNode.TxStreamer.GetMessageCount()
Require(t, err)
duration := time.Since(start)
fmt.Printf("batches posted: %v over %v (%.2f batches/second)\n", batches, duration, float64(batches)/(float64(duration)/float64(time.Second)))
Expand Down
Loading

0 comments on commit 206ee1f

Please sign in to comment.