diff --git a/.dockerignore b/.dockerignore index 05cec369d9..763aeda1be 100644 --- a/.dockerignore +++ b/.dockerignore @@ -13,6 +13,9 @@ solgen/go **/node_modules target/**/* +!target/machines +!target/machines/* +!target/machines/**/* brotli/buildfiles/**/* # these are used by environment outside the docker: diff --git a/arbnode/api.go b/arbnode/api.go index d28d7481d9..51437864d1 100644 --- a/arbnode/api.go +++ b/arbnode/api.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" @@ -23,8 +22,7 @@ func (a *BlockValidatorAPI) LatestValidated(ctx context.Context) (*staker.Global } type BlockValidatorDebugAPI struct { - val *staker.StatelessBlockValidator - blockchain *core.BlockChain + val *staker.StatelessBlockValidator } type ValidateBlockResult struct { diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index c848099513..77a839b70a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/redislock" @@ -47,6 +48,7 @@ import ( var ( batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" ) type batchPosterPosition struct { @@ -66,6 +68,8 @@ type BatchPoster struct { syncMonitor *SyncMonitor seqInboxABI *abi.ABI seqInboxAddr common.Address + bridgeAddr common.Address + gasRefunderAddr common.Address building *buildingBatch daWriter das.DataAvailabilityServiceWriter dataPoster *dataposter.DataPoster @@ -78,6 +82,8 @@ type BatchPoster struct { batchReverted atomic.Bool // indicates whether data poster batch was reverted nextRevertCheckBlock int64 // the last parent block scanned for reverting batches + + accessList func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList } type l1BlockBound int @@ -162,7 +168,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") redislock.AddConfigOptions(prefix+".redis-lock", f) - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfig) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) } @@ -183,6 +189,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", L1BlockBoundBypass: time.Hour, + RedisLock: redislock.DefaultCfg, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -210,67 +217,168 @@ var TestBatchPosterConfig = BatchPosterConfig{ L1BlockBoundBypass: time.Hour, } -func NewBatchPoster(ctx context.Context, dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) { - seqInbox, err := bridgegen.NewSequencerInbox(deployInfo.SequencerInbox, l1Reader.Client()) +type BatchPosterOpts struct { + DataPosterDB ethdb.Database + L1Reader *headerreader.HeaderReader + Inbox *InboxTracker + Streamer *TransactionStreamer + SyncMonitor *SyncMonitor + Config BatchPosterConfigFetcher + DeployInfo *chaininfo.RollupAddresses + TransactOpts *bind.TransactOpts + DAWriter das.DataAvailabilityServiceWriter +} + +func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, error) { + seqInbox, err := bridgegen.NewSequencerInbox(opts.DeployInfo.SequencerInbox, opts.L1Reader.Client()) if err != nil { return nil, err } - bridge, err := bridgegen.NewBridge(deployInfo.Bridge, l1Reader.Client()) + bridge, err := bridgegen.NewBridge(opts.DeployInfo.Bridge, opts.L1Reader.Client()) if err != nil { return nil, err } - if err = config().Validate(); err != nil { + if err = opts.Config().Validate(); err != nil { return nil, err } seqInboxABI, err := bridgegen.SequencerInboxMetaData.GetAbi() if err != nil { return nil, err } - redisClient, err := redisutil.RedisClientFromURL(config().RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(opts.Config().RedisUrl) if err != nil { return nil, err } redisLockConfigFetcher := func() *redislock.SimpleCfg { - return &config().RedisLock + simpleRedisLockConfig := opts.Config().RedisLock + simpleRedisLockConfig.Key = batchPosterSimpleRedisLockKey + return &simpleRedisLockConfig } - redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return syncMonitor.Synced() }) + redisLock, err := redislock.NewSimple(redisClient, redisLockConfigFetcher, func() bool { return opts.SyncMonitor.Synced() }) if err != nil { return nil, err } b := &BatchPoster{ - l1Reader: l1Reader, - inbox: inbox, - streamer: streamer, - syncMonitor: syncMonitor, - config: config, - bridge: bridge, - seqInbox: seqInbox, - seqInboxABI: seqInboxABI, - seqInboxAddr: deployInfo.SequencerInbox, - daWriter: daWriter, - redisLock: redisLock, + l1Reader: opts.L1Reader, + inbox: opts.Inbox, + streamer: opts.Streamer, + syncMonitor: opts.SyncMonitor, + config: opts.Config, + bridge: bridge, + seqInbox: seqInbox, + seqInboxABI: seqInboxABI, + seqInboxAddr: opts.DeployInfo.SequencerInbox, + gasRefunderAddr: opts.Config().gasRefunder, + bridgeAddr: opts.DeployInfo.Bridge, + daWriter: opts.DAWriter, + redisLock: redisLock, + accessList: func(SequencerInboxAccs, AfterDelayedMessagesRead int) types.AccessList { + return AccessList(&AccessListOpts{ + SequencerInboxAddr: opts.DeployInfo.SequencerInbox, + DataPosterAddr: opts.TransactOpts.From, + BridgeAddr: opts.DeployInfo.Bridge, + GasRefunderAddr: opts.Config().gasRefunder, + SequencerInboxAccs: SequencerInboxAccs, + AfterDelayedMessagesRead: AfterDelayedMessagesRead, + }) + }, } dataPosterConfigFetcher := func() *dataposter.DataPosterConfig { - return &config().DataPoster + return &(opts.Config().DataPoster) } b.dataPoster, err = dataposter.NewDataPoster(ctx, &dataposter.DataPosterOpts{ - Database: dataPosterDB, - HeaderReader: l1Reader, - Auth: transactOpts, + Database: opts.DataPosterDB, + HeaderReader: opts.L1Reader, + Auth: opts.TransactOpts, RedisClient: redisClient, RedisLock: redisLock, Config: dataPosterConfigFetcher, MetadataRetriever: b.getBatchPosterPosition, RedisKey: "data-poster.queue", - }, - ) + }) if err != nil { return nil, err } return b, nil } +type AccessListOpts struct { + SequencerInboxAddr common.Address + BridgeAddr common.Address + DataPosterAddr common.Address + GasRefunderAddr common.Address + SequencerInboxAccs int + AfterDelayedMessagesRead int +} + +// AccessList returns access list (contracts, storage slots) for batchposter. +func AccessList(opts *AccessListOpts) types.AccessList { + l := types.AccessList{ + types.AccessTuple{ + Address: opts.SequencerInboxAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), // totalDelayedMessagesRead + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), // bridge + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // maxTimeVariation.delayBlocks + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // maxTimeVariation.futureBlocks + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // maxTimeVariation.delaySeconds + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // maxTimeVariation.futureSeconds + // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of + // "eip1967.proxy.admin" subtracted by 1. + common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), + // IMPLEMENTATION_SLOT from OpenZeppelin, keccak-256 hash + // of "eip1967.proxy.implementation" subtracted by 1. + common.HexToHash("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"), + // isBatchPoster[batchPosterAddr]; for mainnnet it's: "0xa10aa54071443520884ed767b0684edf43acec528b7da83ab38ce60126562660". + common.Hash(arbutil.PaddedKeccak256(opts.DataPosterAddr.Bytes(), []byte{3})), + }, + }, + types.AccessTuple{ + Address: opts.BridgeAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // delayedInboxAccs.length + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // sequencerInboxAccs.length + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000009"), // sequencerInbox + common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), // sequencerReportedSubMessageCount + // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of + // "eip1967.proxy.admin" subtracted by 1. + common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), + // IMPLEMENTATION_SLOT from OpenZeppelin, keccak-256 hash + // of "eip1967.proxy.implementation" subtracted by 1. + common.HexToHash("0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc"), + // These below may change when transaction is actually executed: + // - delayedInboxAccs[delayedInboxAccs.length - 1] + // - delayedInboxAccs.push(...); + }, + }, + } + + for _, v := range []struct{ slotIdx, val int }{ + {7, opts.SequencerInboxAccs - 1}, // - sequencerInboxAccs[sequencerInboxAccs.length - 1]; (keccak256(7, sequencerInboxAccs.length - 1)) + {7, opts.SequencerInboxAccs}, // - sequencerInboxAccs.push(...); (keccak256(7, sequencerInboxAccs.length)) + {6, opts.AfterDelayedMessagesRead - 1}, // - delayedInboxAccs[afterDelayedMessagesRead - 1]; (keccak256(6, afterDelayedMessagesRead - 1)) + } { + sb := arbutil.SumBytes(arbutil.PaddedKeccak256([]byte{byte(v.slotIdx)}), big.NewInt(int64(v.val)).Bytes()) + l[1].StorageKeys = append(l[1].StorageKeys, common.Hash(sb)) + } + + if (opts.GasRefunderAddr != common.Address{}) { + l = append(l, types.AccessTuple{ + Address: opts.GasRefunderAddr, + StorageKeys: []common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // CommonParameters.{maxRefundeeBalance, extraGasMargin, calldataCost, maxGasTip} + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // CommonParameters.{maxGasCost, maxSingleGasUsage} + // allowedContracts[msg.sender]; for mainnet it's: "0x7686888b19bb7b75e46bb1aa328b65150743f4899443d722f0adf8e252ccda41". + common.Hash(arbutil.PaddedKeccak256(opts.SequencerInboxAddr.Bytes(), []byte{1})), + // allowedRefundees[refundee]; for mainnet it's: "0xe85fd79f89ff278fc57d40aecb7947873df9f0beac531c8f71a98f630e1eab62". + common.Hash(arbutil.PaddedKeccak256(opts.DataPosterAddr.Bytes(), []byte{2})), + }, + }) + } + return l +} + // checkRevert checks blocks with number in range [from, to] whether they // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts @@ -909,7 +1017,18 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - tx, err := b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit, new(big.Int)) + tx, err := b.dataPoster.PostTransaction(ctx, + firstMsgTime, + nonce, + newMeta, + b.seqInboxAddr, + data, + gasLimit, + new(big.Int), + b.accessList( + int(batchPosition.NextSeqNum), + int(b.building.segments.delayedMsg)), + ) if err != nil { return false, err } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 3b563e9658..912c6c9afc 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -340,7 +340,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return newFeeCap, newTipCap, nil } -func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { +func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() @@ -362,13 +362,14 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, err } inner := types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: tipCap, - GasFeeCap: feeCap, - Gas: gasLimit, - To: &to, - Value: value, - Data: calldata, + Nonce: nonce, + GasTipCap: tipCap, + GasFeeCap: feeCap, + Gas: gasLimit, + To: &to, + Value: value, + Data: calldata, + AccessList: accessList, } fullTx, err := p.signer(p.sender, types.NewTx(&inner)) if err != nil { @@ -661,21 +662,21 @@ type DangerousConfig struct { // that flags can be reloaded dynamically. type ConfigFetcher func() *DataPosterConfig -func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.String(prefix+".replacement-times", DefaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") - f.Bool(prefix+".wait-for-l1-finality", DefaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") - f.Uint64(prefix+".max-mempool-transactions", DefaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") - f.Int(prefix+".max-queued-transactions", DefaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") - f.Float64(prefix+".target-price-gwei", DefaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") - f.Float64(prefix+".urgency-gwei", DefaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") - f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") - f.Float64(prefix+".min-tip-cap-gwei", DefaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") - f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") - f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") - f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") - f.Bool(prefix+".use-db-storage", DefaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") - f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") - f.Bool(prefix+".legacy-storage-encoding", DefaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") +func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPosterConfig DataPosterConfig) { + f.String(prefix+".replacement-times", defaultDataPosterConfig.ReplacementTimes, "comma-separated list of durations since first posting to attempt a replace-by-fee") + f.Bool(prefix+".wait-for-l1-finality", defaultDataPosterConfig.WaitForL1Finality, "only treat a transaction as confirmed after L1 finality has been achieved (recommended)") + f.Uint64(prefix+".max-mempool-transactions", defaultDataPosterConfig.MaxMempoolTransactions, "the maximum number of transactions to have queued in the mempool at once (0 = unlimited)") + f.Int(prefix+".max-queued-transactions", defaultDataPosterConfig.MaxQueuedTransactions, "the maximum number of unconfirmed transactions to track at once (0 = unlimited)") + f.Float64(prefix+".target-price-gwei", defaultDataPosterConfig.TargetPriceGwei, "the target price to use for maximum fee cap calculation") + f.Float64(prefix+".urgency-gwei", defaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") + f.Float64(prefix+".min-fee-cap-gwei", defaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") + f.Float64(prefix+".min-tip-cap-gwei", defaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".max-tip-cap-gwei", defaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Uint64(prefix+".nonce-rbf-soft-confs", defaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") + f.Bool(prefix+".allocate-mempool-balance", defaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") + f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") + f.Bool(prefix+".use-noop-storage", defaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".legacy-storage-encoding", defaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) addDangerousOptions(prefix+".dangerous", f) diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index aa6d43785e..8162a1cfbe 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -15,8 +15,8 @@ import ( "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/stopwaiter" ) @@ -26,7 +26,7 @@ type DelayedSequencer struct { l1Reader *headerreader.HeaderReader bridge *DelayedBridge inbox *InboxTracker - exec *execution.ExecutionEngine + exec execution.ExecutionSequencer coordinator *SeqCoordinator waitingForFinalizedBlock uint64 mutex sync.Mutex @@ -63,7 +63,7 @@ var TestDelayedSequencerConfig = DelayedSequencerConfig{ UseMergeFinality: true, } -func NewDelayedSequencer(l1Reader *headerreader.HeaderReader, reader *InboxReader, exec *execution.ExecutionEngine, coordinator *SeqCoordinator, config DelayedSequencerConfigFetcher) (*DelayedSequencer, error) { +func NewDelayedSequencer(l1Reader *headerreader.HeaderReader, reader *InboxReader, exec execution.ExecutionSequencer, coordinator *SeqCoordinator, config DelayedSequencerConfigFetcher) (*DelayedSequencer, error) { d := &DelayedSequencer{ l1Reader: l1Reader, bridge: reader.DelayedBridge(), diff --git a/arbnode/execution/node.go b/arbnode/execution/node.go deleted file mode 100644 index 3a1dceddfb..0000000000 --- a/arbnode/execution/node.go +++ /dev/null @@ -1,90 +0,0 @@ -package execution - -import ( - "errors" - - "github.com/ethereum/go-ethereum/arbitrum" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/eth/filters" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/node" - "github.com/offchainlabs/nitro/util/headerreader" -) - -type ExecutionNode struct { - ChainDB ethdb.Database - Backend *arbitrum.Backend - FilterSystem *filters.FilterSystem - ArbInterface *ArbInterface - ExecEngine *ExecutionEngine - Recorder *BlockRecorder - Sequencer *Sequencer // either nil or same as TxPublisher - TxPublisher TransactionPublisher -} - -func CreateExecutionNode( - stack *node.Node, - chainDB ethdb.Database, - l2BlockChain *core.BlockChain, - l1Reader *headerreader.HeaderReader, - syncMonitor arbitrum.SyncProgressBackend, - fwTarget string, - fwConfig *ForwarderConfig, - rpcConfig arbitrum.Config, - recordingDbConfig *arbitrum.RecordingDatabaseConfig, - seqConfigFetcher SequencerConfigFetcher, - precheckConfigFetcher TxPreCheckerConfigFetcher, -) (*ExecutionNode, error) { - execEngine, err := NewExecutionEngine(l2BlockChain) - if err != nil { - return nil, err - } - recorder := NewBlockRecorder(recordingDbConfig, execEngine, chainDB) - var txPublisher TransactionPublisher - var sequencer *Sequencer - seqConfig := seqConfigFetcher() - if seqConfig.Enable { - if fwTarget != "" { - return nil, errors.New("sequencer and forwarding target both set") - } - sequencer, err = NewSequencer(execEngine, l1Reader, seqConfigFetcher) - if err != nil { - return nil, err - } - txPublisher = sequencer - } else { - if fwConfig.RedisUrl != "" { - txPublisher = NewRedisTxForwarder(fwTarget, fwConfig) - } else if fwTarget == "" { - txPublisher = NewTxDropper() - } else { - txPublisher = NewForwarder(fwTarget, fwConfig) - } - } - - txPublisher = NewTxPreChecker(txPublisher, l2BlockChain, precheckConfigFetcher) - arbInterface, err := NewArbInterface(execEngine, txPublisher) - if err != nil { - return nil, err - } - filterConfig := filters.Config{ - LogCacheSize: rpcConfig.FilterLogCacheSize, - Timeout: rpcConfig.FilterTimeout, - } - backend, filterSystem, err := arbitrum.NewBackend(stack, &rpcConfig, chainDB, arbInterface, syncMonitor, filterConfig) - if err != nil { - return nil, err - } - - return &ExecutionNode{ - ChainDB: chainDB, - Backend: backend, - FilterSystem: filterSystem, - ArbInterface: arbInterface, - ExecEngine: execEngine, - Recorder: recorder, - Sequencer: sequencer, - TxPublisher: txPublisher, - }, nil - -} diff --git a/arbnode/inbox_test.go b/arbnode/inbox_test.go index 21eef7499c..3060ae2ae6 100644 --- a/arbnode/inbox_test.go +++ b/arbnode/inbox_test.go @@ -11,10 +11,10 @@ import ( "testing" "time" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/arbmath" @@ -30,7 +30,16 @@ import ( "github.com/offchainlabs/nitro/arbos" ) -func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (*execution.ExecutionEngine, *TransactionStreamer, ethdb.Database, *core.BlockChain) { +type execClientWrapper struct { + *gethexec.ExecutionEngine + t *testing.T +} + +func (w *execClientWrapper) Pause() { w.t.Error("not supported") } +func (w *execClientWrapper) Activate() { w.t.Error("not supported") } +func (w *execClientWrapper) ForwardTo(url string) error { w.t.Error("not supported"); return nil } + +func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (*gethexec.ExecutionEngine, *TransactionStreamer, ethdb.Database, *core.BlockChain) { chainConfig := params.ArbitrumDevTestChainConfig() initData := statetransfer.ArbosInitializationInfo{ @@ -46,18 +55,19 @@ func NewTransactionStreamerForTest(t *testing.T, ownerAddress common.Address) (* arbDb := rawdb.NewMemoryDatabase() initReader := statetransfer.NewMemoryInitDataReader(&initData) - bc, err := execution.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, arbostypes.TestInitMessage, ConfigDefaultL2Test().TxLookupLimit, 0) + bc, err := gethexec.WriteOrTestBlockChain(chainDb, nil, initReader, chainConfig, arbostypes.TestInitMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) if err != nil { Fail(t, err) } transactionStreamerConfigFetcher := func() *TransactionStreamerConfig { return &DefaultTransactionStreamerConfig } - execEngine, err := execution.NewExecutionEngine(bc) + execEngine, err := gethexec.NewExecutionEngine(bc) if err != nil { Fail(t, err) } - inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execEngine, nil, make(chan error, 1), transactionStreamerConfigFetcher) + execSeq := &execClientWrapper{execEngine, t} + inbox, err := NewTransactionStreamer(arbDb, bc.Config(), execSeq, nil, make(chan error, 1), transactionStreamerConfigFetcher) if err != nil { Fail(t, err) } diff --git a/arbnode/maintenance.go b/arbnode/maintenance.go index 2b1837a25b..53d038a0f9 100644 --- a/arbnode/maintenance.go +++ b/arbnode/maintenance.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode/redislock" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" ) @@ -21,6 +22,7 @@ import ( type MaintenanceRunner struct { stopwaiter.StopWaiter + exec execution.FullExecutionClient config MaintenanceConfigFetcher seqCoordinator *SeqCoordinator dbs []ethdb.Database @@ -76,18 +78,20 @@ func MaintenanceConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultMaintenanceConfig = MaintenanceConfig{ TimeOfDay: "", + Lock: redislock.DefaultCfg, minutesAfterMidnight: 0, } type MaintenanceConfigFetcher func() *MaintenanceConfig -func NewMaintenanceRunner(config MaintenanceConfigFetcher, seqCoordinator *SeqCoordinator, dbs []ethdb.Database) (*MaintenanceRunner, error) { +func NewMaintenanceRunner(config MaintenanceConfigFetcher, seqCoordinator *SeqCoordinator, dbs []ethdb.Database, exec execution.FullExecutionClient) (*MaintenanceRunner, error) { cfg := config() if err := cfg.Validate(); err != nil { return nil, fmt.Errorf("validating config: %w", err) } res := &MaintenanceRunner{ + exec: exec, config: config, seqCoordinator: seqCoordinator, dbs: dbs, @@ -167,15 +171,22 @@ func (mr *MaintenanceRunner) maybeRunMaintenance(ctx context.Context) time.Durat func (mr *MaintenanceRunner) runMaintenance() { log.Info("Compacting databases (this may take a while...)") results := make(chan error, len(mr.dbs)) + expected := 0 for _, db := range mr.dbs { + expected++ db := db go func() { results <- db.Compact(nil, nil) }() } - for range mr.dbs { - if err := <-results; err != nil { - log.Warn("Failed to compact database", "err", err) + expected++ + go func() { + results <- mr.exec.Maintenance() + }() + for i := 0; i < expected; i++ { + err := <-results + if err != nil { + log.Warn("maintenance error", "err", err) } } log.Info("Done compacting databases") diff --git a/arbnode/node.go b/arbnode/node.go index 9d43112364..ea09aab253 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -15,13 +15,10 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -29,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbutil" @@ -38,6 +34,8 @@ import ( "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" @@ -343,42 +341,31 @@ func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReade } type Config struct { - RPC arbitrum.Config `koanf:"rpc"` - Sequencer execution.SequencerConfig `koanf:"sequencer" reload:"hot"` - ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` - InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` - DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` - BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` - MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` - ForwardingTarget string `koanf:"forwarding-target"` - Forwarder execution.ForwarderConfig `koanf:"forwarder"` - TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` - BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` - RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` - Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` - Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` - SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` - DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` - Dangerous DangerousConfig `koanf:"dangerous"` - Caching execution.CachingConfig `koanf:"caching"` - Archive bool `koanf:"archive"` - TxLookupLimit uint64 `koanf:"tx-lookup-limit"` - TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` - Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` - ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + Sequencer bool `koanf:"sequencer"` + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` + DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` + BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` + MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` + BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` + Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` + Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` + SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` + Dangerous DangerousConfig `koanf:"dangerous"` + TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` + Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` + ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` } func (c *Config) Validate() error { - if c.ParentChainReader.Enable && c.Sequencer.Enable && !c.DelayedSequencer.Enable { + if c.ParentChainReader.Enable && c.Sequencer && !c.DelayedSequencer.Enable { log.Warn("delayed sequencer is not enabled, despite sequencer and l1 reader being enabled") } - if c.DelayedSequencer.Enable && !c.Sequencer.Enable { + if c.DelayedSequencer.Enable && !c.Sequencer { return errors.New("cannot enable delayed sequencer without enabling sequencer") } - if err := c.Sequencer.Validate(); err != nil { - return err - } if err := c.BlockValidator.Validate(); err != nil { return err } @@ -400,14 +387,6 @@ func (c *Config) Validate() error { return nil } -func (c *Config) ForwardingTargetF() string { - if c.ForwardingTarget == "null" { - return "" - } - - return c.ForwardingTarget -} - func (c *Config) ValidatorRequired() bool { if c.BlockValidator.Enable { return true @@ -419,65 +398,49 @@ func (c *Config) ValidatorRequired() bool { } func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feedOutputEnable bool) { - arbitrum.ConfigAddOptions(prefix+".rpc", f) - execution.SequencerConfigAddOptions(prefix+".sequencer", f) + f.Bool(prefix+".sequencer", ConfigDefault.Sequencer, "enable sequencer") headerreader.AddOptions(prefix+".parent-chain-reader", f) InboxReaderConfigAddOptions(prefix+".inbox-reader", f) DelayedSequencerConfigAddOptions(prefix+".delayed-sequencer", f) BatchPosterConfigAddOptions(prefix+".batch-poster", f) MessagePrunerConfigAddOptions(prefix+".message-pruner", f) - f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (if not sequencer)") - execution.AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) - execution.TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) staker.BlockValidatorConfigAddOptions(prefix+".block-validator", f) - arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) broadcastclient.FeedConfigAddOptions(prefix+".feed", f, feedInputEnable, feedOutputEnable) staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) - execution.CachingConfigAddOptions(prefix+".caching", f) - f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) MaintenanceConfigAddOptions(prefix+".maintenance", f) - resourcemanager.ConfigAddOptions(prefix+".resource-mgmt", f) - - archiveMsg := fmt.Sprintf("retain past block state (deprecated, please use %v.caching.archive)", prefix) - f.Bool(prefix+".archive", ConfigDefault.Archive, archiveMsg) } var ConfigDefault = Config{ - RPC: arbitrum.DefaultConfig, - Sequencer: execution.DefaultSequencerConfig, + Sequencer: false, ParentChainReader: headerreader.DefaultConfig, InboxReader: DefaultInboxReaderConfig, DelayedSequencer: DefaultDelayedSequencerConfig, BatchPoster: DefaultBatchPosterConfig, MessagePruner: DefaultMessagePrunerConfig, - ForwardingTarget: "", - TxPreChecker: execution.DefaultTxPreCheckerConfig, BlockValidator: staker.DefaultBlockValidatorConfig, - RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, Feed: broadcastclient.FeedConfigDefault, Staker: staker.DefaultL1ValidatorConfig, SeqCoordinator: DefaultSeqCoordinatorConfig, DataAvailability: das.DefaultDataAvailabilityConfig, SyncMonitor: DefaultSyncMonitorConfig, Dangerous: DefaultDangerousConfig, - Archive: false, - TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second - Caching: execution.DefaultCachingConfig, TransactionStreamer: DefaultTransactionStreamerConfig, ResourceMgmt: resourcemanager.DefaultConfig, + Maintenance: DefaultMaintenanceConfig, } func ConfigDefaultL1Test() *Config { config := ConfigDefaultL1NonSequencerTest() - config.Sequencer = execution.TestSequencerConfig config.DelayedSequencer = TestDelayedSequencerConfig config.BatchPoster = TestBatchPosterConfig config.SeqCoordinator = TestSeqCoordinatorConfig + config.Sequencer = true + config.Dangerous.NoSequencerCoordinator = true return config } @@ -486,7 +449,6 @@ func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig - config.Sequencer.Enable = false config.DelayedSequencer.Enable = false config.BatchPoster.Enable = false config.SeqCoordinator.Enable = false @@ -494,15 +456,12 @@ func ConfigDefaultL1NonSequencerTest() *Config { config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" - config.Forwarder = execution.DefaultTestForwarderConfig - config.TransactionStreamer = DefaultTransactionStreamerConfig return &config } func ConfigDefaultL2Test() *Config { config := ConfigDefault - config.Sequencer = execution.TestSequencerConfig config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig config.Feed.Input.Verify.Dangerous.AcceptMissing = true @@ -518,21 +477,24 @@ func ConfigDefaultL2Test() *Config { } type DangerousConfig struct { - NoL1Listener bool `koanf:"no-l1-listener"` + NoL1Listener bool `koanf:"no-l1-listener"` + NoSequencerCoordinator bool `koanf:"no-sequencer-coordinator"` } var DefaultDangerousConfig = DangerousConfig{ - NoL1Listener: false, + NoL1Listener: false, + NoSequencerCoordinator: false, } func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".no-l1-listener", DefaultDangerousConfig.NoL1Listener, "DANGEROUS! disables listening to L1. To be used in test nodes only") + f.Bool(prefix+".no-sequencer-coordinator", DefaultDangerousConfig.NoSequencerCoordinator, "DANGEROUS! allows sequencing without sequencer-coordinator") } type Node struct { ArbDB ethdb.Database Stack *node.Node - Execution *execution.ExecutionNode + Execution execution.FullExecutionClient L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses @@ -644,10 +606,10 @@ func StakerDataposter( func createNodeImpl( ctx context.Context, stack *node.Node, - chainDb ethdb.Database, + exec execution.FullExecutionClient, arbDb ethdb.Database, configFetcher ConfigFetcher, - l2BlockChain *core.BlockChain, + l2Config *params.ChainConfig, l1client arbutil.L1Interface, deployInfo *chaininfo.RollupAddresses, txOptsValidator *bind.TransactOpts, @@ -662,7 +624,6 @@ func createNodeImpl( return nil, err } - l2Config := l2BlockChain.Config() l2ChainId := l2Config.ChainID.Uint64() syncMonitor := NewSyncMonitor(&config.SyncMonitor) @@ -686,15 +647,6 @@ func createNodeImpl( } } - sequencerConfigFetcher := func() *execution.SequencerConfig { return &configFetcher.Get().Sequencer } - txprecheckConfigFetcher := func() *execution.TxPreCheckerConfig { return &configFetcher.Get().TxPreChecker } - exec, err := execution.CreateExecutionNode(stack, chainDb, l2BlockChain, l1Reader, syncMonitor, - config.ForwardingTargetF(), &config.Forwarder, config.RPC, &config.RecordingDatabase, - sequencerConfigFetcher, txprecheckConfigFetcher) - if err != nil { - return nil, err - } - var broadcastServer *broadcaster.Broadcaster if config.Feed.Output.Enable { var maybeDataSigner signature.DataSignerFunc @@ -708,7 +660,7 @@ func createNodeImpl( } transactionStreamerConfigFetcher := func() *TransactionStreamerConfig { return &configFetcher.Get().TransactionStreamer } - txStreamer, err := NewTransactionStreamer(arbDb, l2Config, exec.ExecEngine, broadcastServer, fatalErrChan, transactionStreamerConfigFetcher) + txStreamer, err := NewTransactionStreamer(arbDb, l2Config, exec, broadcastServer, fatalErrChan, transactionStreamerConfigFetcher) if err != nil { return nil, err } @@ -725,15 +677,15 @@ func createNodeImpl( } if config.SeqCoordinator.Enable { - coordinator, err = NewSeqCoordinator(dataSigner, bpVerifier, txStreamer, exec.Sequencer, syncMonitor, config.SeqCoordinator) + coordinator, err = NewSeqCoordinator(dataSigner, bpVerifier, txStreamer, exec, syncMonitor, config.SeqCoordinator) if err != nil { return nil, err } - } else if config.Sequencer.Enable && !config.Sequencer.Dangerous.NoCoordinator { - return nil, errors.New("sequencer must be enabled with coordinator, unless dangerous.no-coordinator set") + } else if config.Sequencer && !config.Dangerous.NoSequencerCoordinator { + return nil, errors.New("sequencer must be enabled with coordinator, unless dangerous.no-sequencer-coordinator set") } - dbs := []ethdb.Database{chainDb, arbDb} - maintenanceRunner, err := NewMaintenanceRunner(func() *MaintenanceConfig { return &configFetcher.Get().Maintenance }, coordinator, dbs) + dbs := []ethdb.Database{arbDb} + maintenanceRunner, err := NewMaintenanceRunner(func() *MaintenanceConfig { return &configFetcher.Get().Maintenance }, coordinator, dbs, exec) if err != nil { return nil, err } @@ -823,7 +775,7 @@ func createNodeImpl( } daReader = das.NewReaderPanicWrapper(daReader) } - } else if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee { + } else if l2Config.ArbitrumChainParams.DataAvailabilityCommittee { return nil, errors.New("a data availability service is required for this chain, but it was not configured") } @@ -843,7 +795,7 @@ func createNodeImpl( inboxReader, inboxTracker, txStreamer, - exec.Recorder, + exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), daReader, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, @@ -920,7 +872,7 @@ func createNodeImpl( } var confirmedNotifiers []staker.LatestConfirmedNotifier - if config.MessagePruner.Enable && !config.Caching.Archive { + if config.MessagePruner.Enable { messagePruner = NewMessagePruner(txStreamer, inboxTracker, func() *MessagePrunerConfig { return &configFetcher.Get().MessagePruner }) confirmedNotifiers = append(confirmedNotifiers, messagePruner) } @@ -951,13 +903,24 @@ func createNodeImpl( if txOptsBatchPoster == nil { return nil, errors.New("batchposter, but no TxOpts") } - batchPoster, err = NewBatchPoster(ctx, rawdb.NewTable(arbDb, storage.BatchPosterPrefix), l1Reader, inboxTracker, txStreamer, syncMonitor, func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, deployInfo, txOptsBatchPoster, daWriter) + batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ + DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), + L1Reader: l1Reader, + Inbox: inboxTracker, + Streamer: txStreamer, + SyncMonitor: syncMonitor, + Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, + DeployInfo: deployInfo, + TransactOpts: txOptsBatchPoster, + DAWriter: daWriter, + }) if err != nil { return nil, err } } + // always create DelayedSequencer, it won't do anything if it is disabled - delayedSequencer, err = NewDelayedSequencer(l1Reader, inboxReader, exec.ExecEngine, coordinator, func() *DelayedSequencerConfig { return &configFetcher.Get().DelayedSequencer }) + delayedSequencer, err = NewDelayedSequencer(l1Reader, inboxReader, exec, coordinator, func() *DelayedSequencerConfig { return &configFetcher.Get().DelayedSequencer }) if err != nil { return nil, err } @@ -997,10 +960,10 @@ func (n *Node) OnConfigReload(_ *Config, _ *Config) error { func CreateNode( ctx context.Context, stack *node.Node, - chainDb ethdb.Database, + exec execution.FullExecutionClient, arbDb ethdb.Database, configFetcher ConfigFetcher, - l2BlockChain *core.BlockChain, + l2Config *params.ChainConfig, l1client arbutil.L1Interface, deployInfo *chaininfo.RollupAddresses, txOptsValidator *bind.TransactOpts, @@ -1008,7 +971,7 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, ) (*Node, error) { - currentNode, err := createNodeImpl(ctx, stack, chainDb, arbDb, configFetcher, l2BlockChain, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan) + currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan) if err != nil { return nil, err } @@ -1026,44 +989,12 @@ func CreateNode( Namespace: "arbvalidator", Version: "1.0", Service: &BlockValidatorDebugAPI{ - val: currentNode.StatelessBlockValidator, - blockchain: l2BlockChain, + val: currentNode.StatelessBlockValidator, }, Public: false, }) } - apis = append(apis, rpc.API{ - Namespace: "arb", - Version: "1.0", - Service: execution.NewArbAPI(currentNode.Execution.TxPublisher), - Public: false, - }) - config := configFetcher.Get() - apis = append(apis, rpc.API{ - Namespace: "arbdebug", - Version: "1.0", - Service: execution.NewArbDebugAPI( - l2BlockChain, - config.RPC.ArbDebug.BlockRangeBound, - config.RPC.ArbDebug.TimeoutQueueBound, - ), - Public: false, - }) - apis = append(apis, rpc.API{ - Namespace: "arbtrace", - Version: "1.0", - Service: execution.NewArbTraceForwarderAPI( - config.RPC.ClassicRedirect, - config.RPC.ClassicRedirectTimeout, - ), - Public: false, - }) - apis = append(apis, rpc.API{ - Namespace: "debug", - Service: eth.NewDebugAPI(eth.NewArbEthereum(l2BlockChain, chainDb)), - Public: false, - }) stack.RegisterAPIs(apis) return currentNode, nil @@ -1072,19 +1003,24 @@ func CreateNode( func (n *Node) Start(ctx context.Context) error { // config is the static config at start, not a dynamic config config := n.configFetcher.Get() - n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator) - n.Execution.ArbInterface.Initialize(n) + execClient, ok := n.Execution.(*gethexec.ExecutionNode) + if !ok { + execClient = nil + } + if execClient != nil { + err := execClient.Initialize(ctx, n, n.SyncMonitor) + if err != nil { + return fmt.Errorf("error initializing exec client: %w", err) + } + } + n.SyncMonitor.Initialize(n.InboxReader, n.TxStreamer, n.SeqCoordinator, n.Execution) err := n.Stack.Start() if err != nil { return fmt.Errorf("error starting geth stack: %w", err) } - err = n.Execution.Backend.Start() + err = n.Execution.Start(ctx) if err != nil { - return fmt.Errorf("error starting geth backend: %w", err) - } - err = n.Execution.TxPublisher.Initialize(ctx) - if err != nil { - return fmt.Errorf("error initializing transaction publisher: %w", err) + return fmt.Errorf("error starting exec client: %w", err) } if n.InboxTracker != nil { err = n.InboxTracker.Initialize() @@ -1098,7 +1034,7 @@ func (n *Node) Start(ctx context.Context) error { return fmt.Errorf("error initializing feed broadcast server: %w", err) } } - if n.InboxTracker != nil && n.BroadcastServer != nil && config.Sequencer.Enable && !config.SeqCoordinator.Enable { + if n.InboxTracker != nil && n.BroadcastServer != nil && config.Sequencer && !config.SeqCoordinator.Enable { // Normally, the sequencer would populate the feed backlog when it acquires the lockout. // However, if the sequencer coordinator is not enabled, we must populate the backlog on startup. err = n.InboxTracker.PopulateFeedBacklog(n.BroadcastServer) @@ -1110,23 +1046,12 @@ func (n *Node) Start(ctx context.Context) error { if err != nil { return fmt.Errorf("error starting transaction streamer: %w", err) } - n.Execution.ExecEngine.Start(ctx) if n.InboxReader != nil { err = n.InboxReader.Start(ctx) if err != nil { return fmt.Errorf("error starting inbox reader: %w", err) } } - if n.DelayedSequencer != nil && n.SeqCoordinator == nil { - err = n.DelayedSequencer.ForceSequenceDelayed(ctx) - if err != nil { - return fmt.Errorf("error performing initial delayed sequencing: %w", err) - } - } - err = n.Execution.TxPublisher.Start(ctx) - if err != nil { - return fmt.Errorf("error starting transaction puiblisher: %w", err) - } if n.SeqCoordinator != nil { n.SeqCoordinator.Start(ctx) } @@ -1200,6 +1125,9 @@ func (n *Node) Start(ctx context.Context) error { } func (n *Node) StopAndWait() { + if n.Execution != nil { + n.Execution.StopAndWait() + } if n.MaintenanceRunner != nil && n.MaintenanceRunner.Started() { n.MaintenanceRunner.StopAndWait() } @@ -1212,9 +1140,6 @@ func (n *Node) StopAndWait() { n.SeqCoordinator.PrepareForShutdown() } n.Stack.StopRPC() // does nothing if not running - if n.Execution.TxPublisher.Started() { - n.Execution.TxPublisher.StopAndWait() - } if n.DelayedSequencer != nil && n.DelayedSequencer.Started() { n.DelayedSequencer.StopAndWait() } @@ -1239,7 +1164,6 @@ func (n *Node) StopAndWait() { if n.StatelessBlockValidator != nil { n.StatelessBlockValidator.Stop() } - n.Execution.Recorder.OrderlyShutdown() if n.InboxReader != nil && n.InboxReader.Started() { n.InboxReader.StopAndWait() } @@ -1249,17 +1173,10 @@ func (n *Node) StopAndWait() { if n.TxStreamer.Started() { n.TxStreamer.StopAndWait() } - if n.Execution.ExecEngine.Started() { - n.Execution.ExecEngine.StopAndWait() - } if n.SeqCoordinator != nil && n.SeqCoordinator.Started() { // Just stops the redis client (most other stuff was stopped earlier) n.SeqCoordinator.StopAndWait() } - n.Execution.ArbInterface.BlockChain().Stop() // does nothing if not running - if err := n.Execution.Backend.Stop(); err != nil { - log.Error("backend stop", "err", err) - } if n.DASLifecycleManager != nil { n.DASLifecycleManager.StopAndWaitUntil(2 * time.Second) } diff --git a/arbnode/redislock/redis.go b/arbnode/redislock/redis.go index c02476f04a..c8252e059f 100644 --- a/arbnode/redislock/redis.go +++ b/arbnode/redislock/redis.go @@ -42,7 +42,7 @@ func AddConfigOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".my-id", "", "this node's id prefix when acquiring the lock (optional)") f.Duration(prefix+".lockout-duration", DefaultCfg.LockoutDuration, "how long lock is held") f.Duration(prefix+".refresh-duration", DefaultCfg.RefreshDuration, "how long between consecutive calls to redis") - f.String(prefix+".key", prefix+".simple-lock-key", "key for lock") + f.String(prefix+".key", DefaultCfg.Key, "key for lock") f.Bool(prefix+".background-lock", DefaultCfg.BackgroundLock, "should node always try grabing lock in background") } diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index bd4e7a64ec..1e8405e01a 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -20,9 +20,9 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/redisutil" @@ -41,7 +41,7 @@ type SeqCoordinator struct { sync *SyncMonitor streamer *TransactionStreamer - sequencer *execution.Sequencer + sequencer execution.ExecutionSequencer delayedSequencer *DelayedSequencer signer *signature.SignVerify config SeqCoordinatorConfig // warning: static, don't use for hot reloadable fields @@ -132,7 +132,14 @@ var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ Signer: signature.DefaultSignVerifyConfig, } -func NewSeqCoordinator(dataSigner signature.DataSignerFunc, bpvalidator *contracts.AddressVerifier, streamer *TransactionStreamer, sequencer *execution.Sequencer, sync *SyncMonitor, config SeqCoordinatorConfig) (*SeqCoordinator, error) { +func NewSeqCoordinator( + dataSigner signature.DataSignerFunc, + bpvalidator *contracts.AddressVerifier, + streamer *TransactionStreamer, + sequencer execution.ExecutionSequencer, + sync *SyncMonitor, + config SeqCoordinatorConfig, +) (*SeqCoordinator, error) { redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl) if err != nil { return nil, err diff --git a/arbnode/sync_monitor.go b/arbnode/sync_monitor.go index d01c300fa9..598ea4fb34 100644 --- a/arbnode/sync_monitor.go +++ b/arbnode/sync_monitor.go @@ -6,6 +6,7 @@ import ( "sync/atomic" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" flag "github.com/spf13/pflag" ) @@ -14,6 +15,7 @@ type SyncMonitor struct { inboxReader *InboxReader txStreamer *TransactionStreamer coordinator *SeqCoordinator + exec execution.FullExecutionClient initialized bool } @@ -41,10 +43,11 @@ func SyncMonitorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".coordinator-msg-lag", DefaultSyncMonitorConfig.CoordinatorMsgLag, "allowed lag between local and remote messages") } -func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator) { +func (s *SyncMonitor) Initialize(inboxReader *InboxReader, txStreamer *TransactionStreamer, coordinator *SeqCoordinator, exec execution.FullExecutionClient) { s.inboxReader = inboxReader s.txStreamer = txStreamer s.coordinator = coordinator + s.exec = exec s.initialized = true } @@ -64,13 +67,13 @@ func (s *SyncMonitor) SyncProgressMap() map[string]interface{} { } res["broadcasterQueuedMessagesPos"] = broadcasterQueuedMessagesPos - builtMessageCount, err := s.txStreamer.exec.HeadMessageNumber() + builtMessageCount, err := s.exec.HeadMessageNumber() if err != nil { - res["blockMessageToMessageCountError"] = err.Error() + res["builtMessageCountError"] = err.Error() syncing = true builtMessageCount = 0 } else { - blockNum := s.txStreamer.exec.MessageIndexToBlockNumber(builtMessageCount) + blockNum := s.exec.MessageIndexToBlockNumber(builtMessageCount) res["blockNum"] = blockNum builtMessageCount++ res["messageOfLastBlock"] = builtMessageCount @@ -150,7 +153,7 @@ func (s *SyncMonitor) SafeBlockNumber(ctx context.Context) (uint64, error) { if err != nil { return 0, err } - block := s.txStreamer.exec.MessageIndexToBlockNumber(msg - 1) + block := s.exec.MessageIndexToBlockNumber(msg - 1) return block, nil } @@ -162,7 +165,7 @@ func (s *SyncMonitor) FinalizedBlockNumber(ctx context.Context) (uint64, error) if err != nil { return 0, err } - block := s.txStreamer.exec.MessageIndexToBlockNumber(msg - 1) + block := s.exec.MessageIndexToBlockNumber(msg - 1) return block, nil } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 4d1e542db0..bcc389dc01 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -30,10 +30,10 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/sharedmetrics" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -45,7 +45,7 @@ type TransactionStreamer struct { stopwaiter.StopWaiter chainConfig *params.ChainConfig - exec *execution.ExecutionEngine + exec execution.ExecutionSequencer execLastMsgCount arbutil.MessageIndex validator *staker.BlockValidator @@ -98,7 +98,7 @@ func TransactionStreamerConfigAddOptions(prefix string, f *flag.FlagSet) { func NewTransactionStreamer( db ethdb.Database, chainConfig *params.ChainConfig, - exec *execution.ExecutionEngine, + exec execution.ExecutionSequencer, broadcastServer *broadcaster.Broadcaster, fatalErrChan chan<- error, config TransactionStreamerConfigFetcher, @@ -927,7 +927,7 @@ func (s *TransactionStreamer) ResultAtCount(count arbutil.MessageIndex) (*execut } // return value: true if should be called again immediately -func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec *execution.ExecutionEngine) bool { +func (s *TransactionStreamer) executeNextMsg(ctx context.Context, exec execution.ExecutionSequencer) bool { if ctx.Err() != nil { return false } diff --git a/arbutil/hash.go b/arbutil/hash.go new file mode 100644 index 0000000000..c6e91c8ebf --- /dev/null +++ b/arbutil/hash.go @@ -0,0 +1,26 @@ +package arbutil + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// PaddedKeccak256 pads each argument to 32 bytes, concatenates and returns +// keccak256 hash of the result. +func PaddedKeccak256(args ...[]byte) []byte { + var data []byte + for _, arg := range args { + data = append(data, common.BytesToHash(arg).Bytes()...) + } + return crypto.Keccak256(data) +} + +// SumBytes sums two byte slices and returns the result. +// If the sum of bytes are over 32 bytes, it return last 32. +func SumBytes(a, b []byte) []byte { + A := big.NewInt(0).SetBytes(a) + B := big.NewInt(0).SetBytes(b) + return common.BytesToHash((A.Add(A, B)).Bytes()).Bytes() +} diff --git a/arbutil/hash_test.go b/arbutil/hash_test.go new file mode 100644 index 0000000000..2b93353d08 --- /dev/null +++ b/arbutil/hash_test.go @@ -0,0 +1,83 @@ +package arbutil + +import ( + "bytes" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/google/go-cmp/cmp" +) + +func TestSlotAddress(t *testing.T) { + for _, tc := range []struct { + name string + args [][]byte + want []byte + }{ + { + name: "isBatchPoster[batchPosterAddr]", // Keccak256(addr, 3) + args: [][]byte{ + common.FromHex("0xC1b634853Cb333D3aD8663715b08f41A3Aec47cc"), // mainnet batch poster address + {3}, + }, + want: common.HexToHash("0xa10aa54071443520884ed767b0684edf43acec528b7da83ab38ce60126562660").Bytes(), + }, + { + name: "allowedContracts[msg.sender]", // Keccak256(msg.sender, 1) + args: [][]byte{ + common.FromHex("0x1c479675ad559DC151F6Ec7ed3FbF8ceE79582B6"), // mainnet sequencer address + {1}, + }, + want: common.HexToHash("0xe85fd79f89ff278fc57d40aecb7947873df9f0beac531c8f71a98f630e1eab62").Bytes(), + }, + { + name: "allowedRefundees[refundee]", // Keccak256(msg.sender, 2) + args: [][]byte{ + common.FromHex("0xC1b634853Cb333D3aD8663715b08f41A3Aec47cc"), // mainnet batch poster address + {2}, + }, + want: common.HexToHash("0x7686888b19bb7b75e46bb1aa328b65150743f4899443d722f0adf8e252ccda41").Bytes(), + }, + } { + t.Run(tc.name, func(t *testing.T) { + got := PaddedKeccak256(tc.args...) + if !bytes.Equal(got, tc.want) { + t.Errorf("slotAddress(%x) = %x, want %x", tc.args, got, tc.want) + } + }) + } + +} + +func TestSumBytes(t *testing.T) { + for _, tc := range []struct { + desc string + a, b, want []byte + }{ + { + desc: "simple case", + a: []byte{0x0a, 0x0b}, + b: []byte{0x03, 0x04}, + want: common.HexToHash("0x0d0f").Bytes(), + }, + { + desc: "carry over last byte", + a: []byte{0x0a, 0xff}, + b: []byte{0x01}, + want: common.HexToHash("0x0b00").Bytes(), + }, + { + desc: "overflow", + a: common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), + b: []byte{0x01}, + want: common.HexToHash("0x00").Bytes(), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got := SumBytes(tc.a, tc.b) + if diff := cmp.Diff(got, tc.want); diff != "" { + t.Errorf("SumBytes(%x, %x) = %x want: %x", tc.a, tc.b, got, tc.want) + } + }) + } +} diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index 2649c88192..483b0b3b72 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -96,7 +96,7 @@ var DefaultConfig = Config{ RequireChainId: false, RequireFeedVersion: false, Verify: signature.DefultFeedVerifierConfig, - URL: []string{""}, + URL: []string{}, Timeout: 20 * time.Second, EnableCompression: true, } diff --git a/cmd/genericconf/config.go b/cmd/genericconf/config.go index 8e75b61772..c3282fe1af 100644 --- a/cmd/genericconf/config.go +++ b/cmd/genericconf/config.go @@ -33,7 +33,7 @@ func ConfConfigAddOptions(prefix string, f *flag.FlagSet) { var ConfConfigDefault = ConfConfig{ Dump: false, EnvPrefix: "", - File: nil, + File: []string{}, S3: DefaultS3Config, String: "", ReloadInterval: 0, diff --git a/cmd/genericconf/server.go b/cmd/genericconf/server.go index 53560dfdb0..3da027ab27 100644 --- a/cmd/genericconf/server.go +++ b/cmd/genericconf/server.go @@ -26,7 +26,7 @@ var HTTPConfigDefault = HTTPConfig{ Port: 8547, API: append(node.DefaultConfig.HTTPModules, "eth", "arb"), RPCPrefix: node.DefaultConfig.HTTPPathPrefix, - CORSDomain: node.DefaultConfig.HTTPCors, + CORSDomain: []string{}, VHosts: node.DefaultConfig.HTTPVirtualHosts, ServerTimeouts: HTTPServerTimeoutConfigDefault, } @@ -91,7 +91,7 @@ var WSConfigDefault = WSConfig{ Port: 8548, API: append(node.DefaultConfig.WSModules, "eth", "arb"), RPCPrefix: node.DefaultConfig.WSPathPrefix, - Origins: node.DefaultConfig.WSOrigins, + Origins: []string{}, ExposeAll: node.DefaultConfig.WSExposeAll, } @@ -137,7 +137,7 @@ type GraphQLConfig struct { var GraphQLConfigDefault = GraphQLConfig{ Enable: false, - CORSDomain: node.DefaultConfig.GraphQLCors, + CORSDomain: []string{}, VHosts: node.DefaultConfig.GraphQLVirtualHosts, } diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index a461a36900..43b1c1d206 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -122,7 +122,7 @@ func mainImpl() int { } if err := startMetrics(nodeConfig); err != nil { - log.Error("Starting metrics: %v", err) + log.Error("Error starting metrics", "error", err) return 1 } diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index 4b99b798ee..ea04d4eb1f 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -15,30 +15,49 @@ import ( "time" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/testhelpers" + + "github.com/r3labs/diff/v3" + flag "github.com/spf13/pflag" ) +func TestEmptyCliConfig(t *testing.T) { + f := flag.NewFlagSet("", flag.ContinueOnError) + NodeConfigAddOptions(f) + k, err := confighelpers.BeginCommonParse(f, []string{}) + Require(t, err) + var emptyCliNodeConfig NodeConfig + err = confighelpers.EndCommonParse(k, &emptyCliNodeConfig) + Require(t, err) + if !reflect.DeepEqual(emptyCliNodeConfig, NodeConfigDefault) { + changelog, err := diff.Diff(emptyCliNodeConfig, NodeConfigDefault) + Require(t, err) + Fail(t, "empty cli config differs from expected default", changelog) + } +} + func TestSeqConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") _, _, _, err := ParseNode(context.Background(), args) Require(t, err) } func TestUnsafeStakerConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --node.forwarding-target null --node.staker.dangerous.without-block-validator", " ") + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --execution.forwarding-target null --node.staker.dangerous.without-block-validator", " ") _, _, _, err := ParseNode(context.Background(), args) Require(t, err) } func TestValidatorConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --node.forwarding-target null", " ") + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.staker.enable --node.staker.strategy MakeNodes --node.staker.staker-interval 10s --execution.forwarding-target null", " ") _, _, _, err := ParseNode(context.Background(), args) Require(t, err) } func TestAggregatorConfig(t *testing.T) { - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642 --node.data-availability.enable --node.data-availability.rpc-aggregator.backends {[\"url\":\"http://localhost:8547\",\"pubkey\":\"abc==\",\"signerMask\":0x1]}", " ") + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642 --node.data-availability.enable --node.data-availability.rpc-aggregator.backends {[\"url\":\"http://localhost:8547\",\"pubkey\":\"abc==\",\"signerMask\":0x1]}", " ") _, _, _, err := ParseNode(context.Background(), args) Require(t, err) } @@ -69,13 +88,14 @@ func TestReloads(t *testing.T) { config := NodeConfigDefault update := NodeConfigDefault - update.Node.Sequencer.MaxBlockSpeed++ + update.Node.BatchPoster.MaxSize++ check(reflect.ValueOf(config), false, "config") Require(t, config.CanReload(&config)) Require(t, config.CanReload(&update)) testUnsafe := func() { + t.Helper() if config.CanReload(&update) == nil { Fail(t, "failed to detect unsafe reload") } @@ -87,7 +107,7 @@ func TestReloads(t *testing.T) { testUnsafe() update.ParentChain.ID++ testUnsafe() - update.Node.Sequencer.Forwarder.ConnectionTimeout++ + update.Node.Staker.Enable = !update.Node.Staker.Enable testUnsafe() } @@ -100,7 +120,7 @@ func TestLiveNodeConfig(t *testing.T) { jsonConfig := "{\"chain\":{\"id\":421613}}" Require(t, WriteToConfigFile(configFile, jsonConfig)) - args := strings.Split("--file-logging.enable=false --persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") + args := strings.Split("--file-logging.enable=false --persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") args = append(args, []string{"--conf.file", configFile}...) config, _, _, err := ParseNode(context.Background(), args) Require(t, err) @@ -113,8 +133,8 @@ func TestLiveNodeConfig(t *testing.T) { // check updating the config update := config.ShallowClone() expected := config.ShallowClone() - update.Node.Sequencer.MaxBlockSpeed += 2 * time.Millisecond - expected.Node.Sequencer.MaxBlockSpeed += 2 * time.Millisecond + update.Node.BatchPoster.MaxSize += 100 + expected.Node.BatchPoster.MaxSize += 100 Require(t, liveConfig.Set(update)) if !reflect.DeepEqual(liveConfig.Get(), expected) { Fail(t, "failed to set config") @@ -149,19 +169,19 @@ func TestLiveNodeConfig(t *testing.T) { // change the config file expected = config.ShallowClone() - expected.Node.Sequencer.MaxBlockSpeed += time.Millisecond - jsonConfig = fmt.Sprintf("{\"node\":{\"sequencer\":{\"max-block-speed\":\"%s\"}}, \"chain\":{\"id\":421613}}", expected.Node.Sequencer.MaxBlockSpeed.String()) + expected.Node.BatchPoster.MaxSize += 100 + jsonConfig = fmt.Sprintf("{\"node\":{\"batch-poster\":{\"max-size\":\"%d\"}}, \"chain\":{\"id\":421613}}", expected.Node.BatchPoster.MaxSize) Require(t, WriteToConfigFile(configFile, jsonConfig)) // trigger LiveConfig reload Require(t, syscall.Kill(syscall.Getpid(), syscall.SIGUSR1)) if !PollLiveConfigUntilEqual(liveConfig, expected) { - Fail(t, "failed to update config", config.Node.Sequencer.MaxBlockSpeed, update.Node.Sequencer.MaxBlockSpeed) + Fail(t, "failed to update config", config.Node.BatchPoster.MaxSize, update.Node.BatchPoster.MaxSize) } // change chain.id in the config file (currently non-reloadable) - jsonConfig = fmt.Sprintf("{\"node\":{\"sequencer\":{\"max-block-speed\":\"%s\"}}, \"chain\":{\"id\":421703}}", expected.Node.Sequencer.MaxBlockSpeed.String()) + jsonConfig = fmt.Sprintf("{\"node\":{\"batch-poster\":{\"max-size\":\"%d\"}}, \"chain\":{\"id\":421703}}", expected.Node.BatchPoster.MaxSize) Require(t, WriteToConfigFile(configFile, jsonConfig)) // trigger LiveConfig reload @@ -181,7 +201,7 @@ func TestPeriodicReloadOfLiveNodeConfig(t *testing.T) { jsonConfig := "{\"conf\":{\"reload-interval\":\"20ms\"}}" Require(t, WriteToConfigFile(configFile, jsonConfig)) - args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") + args := strings.Split("--persistent.chain /tmp/data --init.dev-init --node.parent-chain-reader.enable=false --parent-chain.id 5 --chain.id 421613 --parent-chain.wallet.pathname /l1keystore --parent-chain.wallet.password passphrase --http.addr 0.0.0.0 --ws.addr 0.0.0.0 --node.sequencer --execution.sequencer.enable --node.feed.output.enable --node.feed.output.port 9642", " ") args = append(args, []string{"--conf.file", configFile}...) config, _, _, err := ParseNode(context.Background(), args) Require(t, err) diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index bdba7c1210..bef0f83d1f 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -34,12 +34,12 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/ipfshelper" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/statetransfer" "github.com/spf13/pflag" @@ -268,7 +268,7 @@ var hashListRegex = regexp.MustCompile("^(0x)?[0-9a-fA-F]{64}(,(0x)?[0-9a-fA-F]{ // Finds important roots to retain while proving func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node.Node, nodeConfig *NodeConfig, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) ([]common.Hash, error) { initConfig := &nodeConfig.Init - chainConfig := execution.TryReadStoredChainConfig(chainDb) + chainConfig := gethexec.TryReadStoredChainConfig(chainDb) if chainConfig == nil { return nil, errors.New("database doesn't have a chain config (was this node initialized?)") } @@ -440,9 +440,9 @@ func pruneChainDb(ctx context.Context, chainDb ethdb.Database, stack *node.Node, func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeConfig, chainId *big.Int, cacheConfig *core.CacheConfig, l1Client arbutil.L1Interface, rollupAddrs chaininfo.RollupAddresses) (ethdb.Database, *core.BlockChain, error) { if !config.Init.Force { if readOnlyDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", 0, 0, "", "", true); err == nil { - if chainConfig := execution.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { + if chainConfig := gethexec.TryReadStoredChainConfig(readOnlyDb); chainConfig != nil { readOnlyDb.Close() - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Node.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "", false) + chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "", false) if err != nil { return chainDb, nil, err } @@ -450,7 +450,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, fmt.Errorf("error pruning: %w", err) } - l2BlockChain, err := execution.GetBlockChain(chainDb, cacheConfig, chainConfig, config.Node.TxLookupLimit) + l2BlockChain, err := gethexec.GetBlockChain(chainDb, cacheConfig, chainConfig, config.Execution.TxLookupLimit) if err != nil { return chainDb, nil, err } @@ -487,7 +487,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var initDataReader statetransfer.InitDataReader = nil - chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Node.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "", false) + chainDb, err := stack.OpenDatabaseWithFreezer("l2chaindata", config.Execution.Caching.DatabaseCache, config.Persistent.Handles, config.Persistent.Ancient, "", false) if err != nil { return chainDb, nil, err } @@ -529,11 +529,11 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo var l2BlockChain *core.BlockChain txIndexWg := sync.WaitGroup{} if initDataReader == nil { - chainConfig = execution.TryReadStoredChainConfig(chainDb) + chainConfig = gethexec.TryReadStoredChainConfig(chainDb) if chainConfig == nil { return chainDb, nil, errors.New("no --init.* mode supplied and chain data not in expected directory") } - l2BlockChain, err = execution.GetBlockChain(chainDb, cacheConfig, chainConfig, config.Node.TxLookupLimit) + l2BlockChain, err = gethexec.GetBlockChain(chainDb, cacheConfig, chainConfig, config.Execution.TxLookupLimit) if err != nil { return chainDb, nil, err } @@ -631,7 +631,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo log.Warn("Created fake init message as L1Reader is disabled and serialized chain config from init message is not available", "json", string(serializedChainConfig)) } - l2BlockChain, err = execution.WriteOrTestBlockChain(chainDb, cacheConfig, initDataReader, chainConfig, parsedInitMessage, config.Node.TxLookupLimit, config.Init.AccountsPerSync) + l2BlockChain, err = gethexec.WriteOrTestBlockChain(chainDb, cacheConfig, initDataReader, chainConfig, parsedInitMessage, config.Execution.TxLookupLimit, config.Init.AccountsPerSync) if err != nil { return chainDb, nil, err } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 56e93b6d99..285cc3fe86 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -41,7 +41,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -49,6 +48,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" + "github.com/offchainlabs/nitro/execution/gethexec" _ "github.com/offchainlabs/nitro/nodeInterface" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/staker" @@ -205,10 +205,6 @@ func mainImpl() int { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 } - if nodeConfig.Node.Archive { - log.Warn("--node.archive has been deprecated. Please use --node.caching.archive instead.") - nodeConfig.Node.Caching.Archive = true - } log.Info("Running Arbitrum nitro node", "revision", vcsRevision, "vcs.time", vcsTime) @@ -220,25 +216,19 @@ func mainImpl() int { nodeConfig.Node.ParentChainReader.Enable = true } - if nodeConfig.Node.Sequencer.Enable { - if nodeConfig.Node.ForwardingTargetF() != "" { - flag.Usage() - log.Crit("forwarding-target cannot be set when sequencer is enabled") - } - if nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { - flag.Usage() - log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") - } - } else if nodeConfig.Node.ForwardingTarget == "" { + if nodeConfig.Execution.Sequencer.Enable && nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { flag.Usage() - log.Crit("forwarding-target unset, and not sequencer (can set to \"null\" to disable forwarding)") + log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") + } + if nodeConfig.Execution.Sequencer.Enable != nodeConfig.Node.Sequencer { + log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer) } var l1TransactionOpts *bind.TransactOpts var dataSigner signature.DataSignerFunc var l1TransactionOptsValidator *bind.TransactOpts var l1TransactionOptsBatchPoster *bind.TransactOpts - sequencerNeedsKey := (nodeConfig.Node.Sequencer.Enable && !nodeConfig.Node.Feed.Output.DisableSigning) || nodeConfig.Node.BatchPoster.Enable + sequencerNeedsKey := (nodeConfig.Node.Sequencer && !nodeConfig.Node.Feed.Output.DisableSigning) || nodeConfig.Node.BatchPoster.Enable validatorNeedsKey := nodeConfig.Node.Staker.OnlyCreateWalletContract || nodeConfig.Node.Staker.Enable && !strings.EqualFold(nodeConfig.Node.Staker.Strategy, "watchtower") l1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) @@ -315,11 +305,11 @@ func mainImpl() int { } } - if nodeConfig.Node.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { - if nodeConfig.Node.Archive { - nodeConfig.Node.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth + if nodeConfig.Execution.RPC.MaxRecreateStateDepth == arbitrum.UninitializedMaxRecreateStateDepth { + if nodeConfig.Execution.Caching.Archive { + nodeConfig.Execution.RPC.MaxRecreateStateDepth = arbitrum.DefaultArchiveNodeMaxRecreateStateDepth } else { - nodeConfig.Node.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth + nodeConfig.Execution.RPC.MaxRecreateStateDepth = arbitrum.DefaultNonArchiveNodeMaxRecreateStateDepth } } liveNodeConfig := genericconf.NewLiveConfig[*NodeConfig](args, nodeConfig, func(ctx context.Context, args []string) (*NodeConfig, error) { @@ -377,9 +367,9 @@ func mainImpl() int { return 0 } - if nodeConfig.Node.Caching.Archive && nodeConfig.Node.TxLookupLimit != 0 { + if nodeConfig.Execution.Caching.Archive && nodeConfig.Execution.TxLookupLimit != 0 { log.Info("retaining ability to lookup full transaction history as archive mode is enabled") - nodeConfig.Node.TxLookupLimit = 0 + nodeConfig.Execution.TxLookupLimit = 0 } if err := resourcemanager.Init(&nodeConfig.Node.ResourceMgmt); err != nil { @@ -409,7 +399,7 @@ func mainImpl() int { } if err := startMetrics(nodeConfig); err != nil { - log.Error("Starting metrics: %v", err) + log.Error("Error starting metrics", "error", err) return 1 } @@ -420,7 +410,7 @@ func mainImpl() int { } }() - chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), gethexec.DefaultCacheConfigFor(stack, &nodeConfig.Execution.Caching), l1Client, rollupAddrs) if l2BlockChain != nil { deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } @@ -463,13 +453,26 @@ func mainImpl() int { } } - currentNode, err := arbnode.CreateNode( + execNode, err := gethexec.CreateExecutionNode( ctx, stack, chainDb, + l2BlockChain, + l1Client, + func() *gethexec.Config { return &liveNodeConfig.Get().Execution }, + ) + if err != nil { + log.Error("failed to create execution node", "err", err) + return 1 + } + + currentNode, err := arbnode.CreateNode( + ctx, + stack, + execNode, arbDb, &NodeConfigFetcher{liveNodeConfig}, - l2BlockChain, + l2BlockChain.Config(), l1Client, &rollupAddrs, l1TransactionOptsValidator, @@ -505,7 +508,7 @@ func mainImpl() int { } gqlConf := nodeConfig.GraphQL if gqlConf.Enable { - if err := graphql.New(stack, currentNode.Execution.Backend.APIBackend(), currentNode.Execution.FilterSystem, gqlConf.CORSDomain, gqlConf.VHosts); err != nil { + if err := graphql.New(stack, execNode.Backend.APIBackend(), execNode.FilterSystem, gqlConf.CORSDomain, gqlConf.VHosts); err != nil { log.Error("failed to register the GraphQL service", "err", err) return 1 } @@ -564,6 +567,7 @@ func mainImpl() int { type NodeConfig struct { Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` Node arbnode.Config `koanf:"node" reload:"hot"` + Execution gethexec.Config `koanf:"execution" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` Chain conf.L2Config `koanf:"chain"` @@ -587,16 +591,23 @@ type NodeConfig struct { var NodeConfigDefault = NodeConfig{ Conf: genericconf.ConfConfigDefault, Node: arbnode.ConfigDefault, + Execution: gethexec.ConfigDefault, + Validation: valnode.DefaultValidationConfig, ParentChain: conf.L1ConfigDefault, Chain: conf.L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", + FileLogging: genericconf.DefaultFileLoggingConfig, Persistent: conf.PersistentConfigDefault, HTTP: genericconf.HTTPConfigDefault, WS: genericconf.WSConfigDefault, IPC: genericconf.IPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, + GraphQL: genericconf.GraphQLConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, + Init: InitConfigDefault, + Rpc: genericconf.DefaultRpcConfig, PProf: false, PprofCfg: genericconf.PProfDefault, } @@ -604,6 +615,7 @@ var NodeConfigDefault = NodeConfig{ func NodeConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) arbnode.ConfigAddOptions("node", f, true, true) + gethexec.ConfigAddOptions("execution", f) valnode.ValidationConfigAddOptions("validation", f) conf.L1ConfigAddOptions("parent-chain", f) conf.L2ConfigAddOptions("chain", f) @@ -757,6 +769,9 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa nodeConfig.ParentChain.Wallet = genericconf.WalletConfigDefault nodeConfig.Chain.DevWallet = genericconf.WalletConfigDefault + if nodeConfig.Execution.Caching.Archive { + nodeConfig.Node.MessagePruner.Enable = false + } err = nodeConfig.Validate() if err != nil { return nil, nil, nil, err @@ -794,7 +809,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c "parent-chain.id": chainInfo.ParentChainId, } if chainInfo.SequencerUrl != "" { - chainDefaults["node.forwarding-target"] = chainInfo.SequencerUrl + chainDefaults["execution.forwarding-target"] = chainInfo.SequencerUrl } if chainInfo.FeedUrl != "" { chainDefaults["node.feed.input.url"] = chainInfo.FeedUrl @@ -808,7 +823,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c chainDefaults["init.empty"] = true } if parentChainIsArbitrum { - l2MaxTxSize := execution.DefaultSequencerConfig.MaxTxDataSize + l2MaxTxSize := gethexec.DefaultSequencerConfig.MaxTxDataSize bufferSpace := 5000 if l2MaxTxSize < bufferSpace*2 { return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 4f9a8b2ea1..18a2b10f2f 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -150,7 +150,7 @@ func BeginCommonParse(f *flag.FlagSet, args []string) (*koanf.Koanf, error) { if f.NArg() != 0 { // Unexpected number of parameters - return nil, errors.New("unexpected number of parameters") + return nil, fmt.Errorf("unexpected parameter: %s", f.Arg(0)) } var k = koanf.New(".") diff --git a/das/das.go b/das/das.go index 208a12cc83..9133b73ea4 100644 --- a/das/das.go +++ b/das/das.go @@ -69,6 +69,7 @@ var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RestAggregator: DefaultRestfulClientAggregatorConfig, ParentChainConnectionAttempts: 15, PanicOnError: false, + IpfsStorage: DefaultIpfsStorageServiceConfig, } func OptionalAddressFromString(s string) (*common.Address, error) { diff --git a/arbnode/execution/api.go b/execution/gethexec/api.go similarity index 99% rename from arbnode/execution/api.go rename to execution/gethexec/api.go index 5245f74f34..c19072ae77 100644 --- a/arbnode/execution/api.go +++ b/execution/gethexec/api.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package execution +package gethexec import ( "context" diff --git a/arbnode/execution/arb_interface.go b/execution/gethexec/arb_interface.go similarity index 98% rename from arbnode/execution/arb_interface.go rename to execution/gethexec/arb_interface.go index 21eca96077..50d7dfb891 100644 --- a/arbnode/execution/arb_interface.go +++ b/execution/gethexec/arb_interface.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package execution +package gethexec import ( "context" diff --git a/arbnode/execution/block_recorder.go b/execution/gethexec/block_recorder.go similarity index 97% rename from arbnode/execution/block_recorder.go rename to execution/gethexec/block_recorder.go index dc5daa6f7b..a0f6d837e4 100644 --- a/arbnode/execution/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -1,4 +1,4 @@ -package execution +package gethexec import ( "context" @@ -15,6 +15,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/validator" ) @@ -39,13 +40,6 @@ type BlockRecorder struct { preparedLock sync.Mutex } -type RecordResult struct { - Pos arbutil.MessageIndex - BlockHash common.Hash - Preimages map[common.Hash][]byte - BatchInfo []validator.BatchInfo -} - func NewBlockRecorder(config *arbitrum.RecordingDatabaseConfig, execEngine *ExecutionEngine, ethDb ethdb.Database) *BlockRecorder { recorder := &BlockRecorder{ execEngine: execEngine, @@ -77,7 +71,7 @@ func (r *BlockRecorder) RecordBlockCreation( ctx context.Context, pos arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, -) (*RecordResult, error) { +) (*execution.RecordResult, error) { blockNum := r.execEngine.MessageIndexToBlockNumber(pos) @@ -172,7 +166,12 @@ func (r *BlockRecorder) RecordBlockCreation( r.updateLastHdr(prevHeader) r.updateValidCandidateHdr(prevHeader) - return &RecordResult{pos, blockHash, preimages, readBatchInfo}, err + return &execution.RecordResult{ + Pos: pos, + BlockHash: blockHash, + Preimages: preimages, + BatchInfo: readBatchInfo, + }, err } func (r *BlockRecorder) updateLastHdr(hdr *types.Header) { diff --git a/arbnode/execution/blockchain.go b/execution/gethexec/blockchain.go similarity index 99% rename from arbnode/execution/blockchain.go rename to execution/gethexec/blockchain.go index 00a59f068b..9e1ee0c30a 100644 --- a/arbnode/execution/blockchain.go +++ b/execution/gethexec/blockchain.go @@ -1,4 +1,4 @@ -package execution +package gethexec import ( "errors" diff --git a/arbnode/execution/executionengine.go b/execution/gethexec/executionengine.go similarity index 95% rename from arbnode/execution/executionengine.go rename to execution/gethexec/executionengine.go index da01e27983..58e91a197e 100644 --- a/arbnode/execution/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -1,4 +1,4 @@ -package execution +package gethexec import ( "context" @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -20,21 +19,16 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/sharedmetrics" "github.com/offchainlabs/nitro/util/stopwaiter" ) -type TransactionStreamerInterface interface { - WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error - ExpectChosenSequencer() error - FetchBatch(batchNum uint64) ([]byte, error) -} - type ExecutionEngine struct { stopwaiter.StopWaiter bc *core.BlockChain - streamer TransactionStreamerInterface + streamer execution.TransactionStreamer recorder *BlockRecorder resequenceChan chan []*arbostypes.MessageWithMetadata @@ -77,7 +71,7 @@ func (s *ExecutionEngine) EnableReorgSequencing() { s.reorgSequencing = true } -func (s *ExecutionEngine) SetTransactionStreamer(streamer TransactionStreamerInterface) { +func (s *ExecutionEngine) SetTransactionStreamer(streamer execution.TransactionStreamer) { if s.Started() { panic("trying to set transaction streamer after start") } @@ -245,15 +239,13 @@ func (s *ExecutionEngine) resequenceReorgedMessages(messages []*arbostypes.Messa } } -var ErrSequencerInsertLockTaken = errors.New("insert lock taken") - func (s *ExecutionEngine) sequencerWrapper(sequencerFunc func() (*types.Block, error)) (*types.Block, error) { attempts := 0 for { s.createBlocksMutex.Lock() block, err := sequencerFunc() s.createBlocksMutex.Unlock() - if !errors.Is(err, ErrSequencerInsertLockTaken) { + if !errors.Is(err, execution.ErrSequencerInsertLockTaken) { return block, err } // We got SequencerInsertLockTaken @@ -479,20 +471,18 @@ func (s *ExecutionEngine) appendBlock(block *types.Block, statedb *state.StateDB return nil } -type MessageResult struct { - BlockHash common.Hash - SendRoot common.Hash -} - -func (s *ExecutionEngine) resultFromHeader(header *types.Header) (*MessageResult, error) { +func (s *ExecutionEngine) resultFromHeader(header *types.Header) (*execution.MessageResult, error) { if header == nil { return nil, fmt.Errorf("result not found") } info := types.DeserializeHeaderExtraInformation(header) - return &MessageResult{header.Hash(), info.SendRoot}, nil + return &execution.MessageResult{ + BlockHash: header.Hash(), + SendRoot: info.SendRoot, + }, nil } -func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*MessageResult, error) { +func (s *ExecutionEngine) ResultAtPos(pos arbutil.MessageIndex) (*execution.MessageResult, error) { return s.resultFromHeader(s.bc.GetHeaderByNumber(s.MessageIndexToBlockNumber(pos))) } diff --git a/arbnode/execution/forwarder.go b/execution/gethexec/forwarder.go similarity index 99% rename from arbnode/execution/forwarder.go rename to execution/gethexec/forwarder.go index 5d6938be22..7dea24b417 100644 --- a/arbnode/execution/forwarder.go +++ b/execution/gethexec/forwarder.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package execution +package gethexec import ( "context" diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go new file mode 100644 index 0000000000..1068dda967 --- /dev/null +++ b/execution/gethexec/node.go @@ -0,0 +1,367 @@ +package gethexec + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "testing" + + "github.com/ethereum/go-ethereum/arbitrum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/filters" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/headerreader" + flag "github.com/spf13/pflag" +) + +type DangerousConfig struct { + ReorgToBlock int64 `koanf:"reorg-to-block"` +} + +var DefaultDangerousConfig = DangerousConfig{ + ReorgToBlock: -1, +} + +func DangerousConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Int64(prefix+".reorg-to-block", DefaultDangerousConfig.ReorgToBlock, "DANGEROUS! forces a reorg to an old block height. To be used for testing only. -1 to disable") +} + +type Config struct { + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + Sequencer SequencerConfig `koanf:"sequencer" reload:"hot"` + RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` + TxPreChecker TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` + Forwarder ForwarderConfig `koanf:"forwarder"` + ForwardingTarget string `koanf:"forwarding-target"` + Caching CachingConfig `koanf:"caching"` + RPC arbitrum.Config `koanf:"rpc"` + TxLookupLimit uint64 `koanf:"tx-lookup-limit"` + Dangerous DangerousConfig `koanf:"dangerous"` + + forwardingTarget string +} + +func (c *Config) Validate() error { + if err := c.Sequencer.Validate(); err != nil { + return err + } + if !c.Sequencer.Enable && c.ForwardingTarget == "" { + return errors.New("ForwardingTarget not set and not sequencer (can use \"null\")") + } + if c.ForwardingTarget == "null" { + c.forwardingTarget = "" + } else { + c.forwardingTarget = c.ForwardingTarget + } + if c.forwardingTarget != "" && c.Sequencer.Enable { + return errors.New("ForwardingTarget set and sequencer enabled") + } + return nil +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + arbitrum.ConfigAddOptions(prefix+".rpc", f) + SequencerConfigAddOptions(prefix+".sequencer", f) + arbitrum.RecordingDatabaseConfigAddOptions(prefix+".recording-database", f) + f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") + AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) + TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) + CachingConfigAddOptions(prefix+".caching", f) + f.Uint64(prefix+".tx-lookup-limit", ConfigDefault.TxLookupLimit, "retain the ability to lookup transactions by hash for the past N blocks (0 = all blocks)") + DangerousConfigAddOptions(prefix+".dangerous", f) +} + +var ConfigDefault = Config{ + RPC: arbitrum.DefaultConfig, + Sequencer: DefaultSequencerConfig, + RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, + ForwardingTarget: "", + TxPreChecker: DefaultTxPreCheckerConfig, + TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second + Caching: DefaultCachingConfig, + Dangerous: DefaultDangerousConfig, + Forwarder: DefaultNodeForwarderConfig, +} + +func ConfigDefaultNonSequencerTest() *Config { + config := ConfigDefault + config.Sequencer.Enable = false + config.Forwarder = DefaultTestForwarderConfig + config.ForwardingTarget = "null" + + _ = config.Validate() + + return &config +} + +func ConfigDefaultTest() *Config { + config := ConfigDefault + config.Sequencer = TestSequencerConfig + config.ForwardingTarget = "null" + + _ = config.Validate() + + return &config +} + +type ConfigFetcher func() *Config + +type ExecutionNode struct { + ChainDB ethdb.Database + Backend *arbitrum.Backend + FilterSystem *filters.FilterSystem + ArbInterface *ArbInterface + ExecEngine *ExecutionEngine + Recorder *BlockRecorder + Sequencer *Sequencer // either nil or same as TxPublisher + TxPublisher TransactionPublisher + ConfigFetcher ConfigFetcher + ParentChainReader *headerreader.HeaderReader + started atomic.Bool +} + +func CreateExecutionNode( + ctx context.Context, + stack *node.Node, + chainDB ethdb.Database, + l2BlockChain *core.BlockChain, + l1client arbutil.L1Interface, + configFetcher ConfigFetcher, +) (*ExecutionNode, error) { + config := configFetcher() + execEngine, err := NewExecutionEngine(l2BlockChain) + if err != nil { + return nil, err + } + recorder := NewBlockRecorder(&config.RecordingDatabase, execEngine, chainDB) + var txPublisher TransactionPublisher + var sequencer *Sequencer + + var parentChainReader *headerreader.HeaderReader + if l1client != nil { + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + parentChainReader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher().ParentChainReader }, arbSys) + if err != nil { + return nil, err + } + } + + if config.Sequencer.Enable { + seqConfigFetcher := func() *SequencerConfig { return &configFetcher().Sequencer } + sequencer, err = NewSequencer(execEngine, parentChainReader, seqConfigFetcher) + if err != nil { + return nil, err + } + txPublisher = sequencer + } else { + if config.Forwarder.RedisUrl != "" { + txPublisher = NewRedisTxForwarder(config.forwardingTarget, &config.Forwarder) + } else if config.forwardingTarget == "" { + txPublisher = NewTxDropper() + } else { + txPublisher = NewForwarder(config.forwardingTarget, &config.Forwarder) + } + } + + txprecheckConfigFetcher := func() *TxPreCheckerConfig { return &configFetcher().TxPreChecker } + + txPublisher = NewTxPreChecker(txPublisher, l2BlockChain, txprecheckConfigFetcher) + arbInterface, err := NewArbInterface(execEngine, txPublisher) + if err != nil { + return nil, err + } + filterConfig := filters.Config{ + LogCacheSize: config.RPC.FilterLogCacheSize, + Timeout: config.RPC.FilterTimeout, + } + backend, filterSystem, err := arbitrum.NewBackend(stack, &config.RPC, chainDB, arbInterface, filterConfig) + if err != nil { + return nil, err + } + + apis := []rpc.API{{ + Namespace: "arb", + Version: "1.0", + Service: NewArbAPI(txPublisher), + Public: false, + }} + apis = append(apis, rpc.API{ + Namespace: "arbdebug", + Version: "1.0", + Service: NewArbDebugAPI( + l2BlockChain, + config.RPC.ArbDebug.BlockRangeBound, + config.RPC.ArbDebug.TimeoutQueueBound, + ), + Public: false, + }) + apis = append(apis, rpc.API{ + Namespace: "arbtrace", + Version: "1.0", + Service: NewArbTraceForwarderAPI( + config.RPC.ClassicRedirect, + config.RPC.ClassicRedirectTimeout, + ), + Public: false, + }) + apis = append(apis, rpc.API{ + Namespace: "debug", + Service: eth.NewDebugAPI(eth.NewArbEthereum(l2BlockChain, chainDB)), + Public: false, + }) + + stack.RegisterAPIs(apis) + + return &ExecutionNode{ + ChainDB: chainDB, + Backend: backend, + FilterSystem: filterSystem, + ArbInterface: arbInterface, + ExecEngine: execEngine, + Recorder: recorder, + Sequencer: sequencer, + TxPublisher: txPublisher, + ConfigFetcher: configFetcher, + ParentChainReader: parentChainReader, + }, nil + +} + +func (n *ExecutionNode) Initialize(ctx context.Context, arbnode interface{}, sync arbitrum.SyncProgressBackend) error { + n.ArbInterface.Initialize(n) + err := n.Backend.Start() + if err != nil { + return fmt.Errorf("error starting geth backend: %w", err) + } + err = n.TxPublisher.Initialize(ctx) + if err != nil { + return fmt.Errorf("error initializing transaction publisher: %w", err) + } + err = n.Backend.APIBackend().SetSyncBackend(sync) + if err != nil { + return fmt.Errorf("error setting sync backend: %w", err) + } + return nil +} + +// not thread safe +func (n *ExecutionNode) Start(ctx context.Context) error { + if n.started.Swap(true) { + return errors.New("already started") + } + // TODO after separation + // err := n.Stack.Start() + // if err != nil { + // return fmt.Errorf("error starting geth stack: %w", err) + // } + n.ExecEngine.Start(ctx) + err := n.TxPublisher.Start(ctx) + if err != nil { + return fmt.Errorf("error starting transaction puiblisher: %w", err) + } + if n.ParentChainReader != nil { + n.ParentChainReader.Start(ctx) + } + return nil +} + +func (n *ExecutionNode) StopAndWait() { + if !n.started.Load() { + return + } + // TODO after separation + // n.Stack.StopRPC() // does nothing if not running + if n.TxPublisher.Started() { + n.TxPublisher.StopAndWait() + } + n.Recorder.OrderlyShutdown() + if n.ParentChainReader != nil && n.ParentChainReader.Started() { + n.ParentChainReader.StopAndWait() + } + if n.ExecEngine.Started() { + n.ExecEngine.StopAndWait() + } + n.ArbInterface.BlockChain().Stop() // does nothing if not running + if err := n.Backend.Stop(); err != nil { + log.Error("backend stop", "err", err) + } + // TODO after separation + // if err := n.Stack.Close(); err != nil { + // log.Error("error on stak close", "err", err) + // } +} + +func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error { + return n.ExecEngine.DigestMessage(num, msg) +} +func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { + return n.ExecEngine.Reorg(count, newMessages, oldMessages) +} +func (n *ExecutionNode) HeadMessageNumber() (arbutil.MessageIndex, error) { + return n.ExecEngine.HeadMessageNumber() +} +func (n *ExecutionNode) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) { + return n.ExecEngine.HeadMessageNumberSync(t) +} +func (n *ExecutionNode) NextDelayedMessageNumber() (uint64, error) { + return n.ExecEngine.NextDelayedMessageNumber() +} +func (n *ExecutionNode) SequenceDelayedMessage(message *arbostypes.L1IncomingMessage, delayedSeqNum uint64) error { + return n.ExecEngine.SequenceDelayedMessage(message, delayedSeqNum) +} +func (n *ExecutionNode) ResultAtPos(pos arbutil.MessageIndex) (*execution.MessageResult, error) { + return n.ExecEngine.ResultAtPos(pos) +} + +func (n *ExecutionNode) RecordBlockCreation( + ctx context.Context, + pos arbutil.MessageIndex, + msg *arbostypes.MessageWithMetadata, +) (*execution.RecordResult, error) { + return n.Recorder.RecordBlockCreation(ctx, pos, msg) +} +func (n *ExecutionNode) MarkValid(pos arbutil.MessageIndex, resultHash common.Hash) { + n.Recorder.MarkValid(pos, resultHash) +} +func (n *ExecutionNode) PrepareForRecord(ctx context.Context, start, end arbutil.MessageIndex) error { + return n.Recorder.PrepareForRecord(ctx, start, end) +} + +func (n *ExecutionNode) Pause() { + if n.Sequencer != nil { + n.Sequencer.Pause() + } +} +func (n *ExecutionNode) Activate() { + if n.Sequencer != nil { + n.Sequencer.Activate() + } +} +func (n *ExecutionNode) ForwardTo(url string) error { + if n.Sequencer != nil { + return n.Sequencer.ForwardTo(url) + } else { + return errors.New("forwardTo not supported - sequencer not active") + } +} +func (n *ExecutionNode) SetTransactionStreamer(streamer execution.TransactionStreamer) { + n.ExecEngine.SetTransactionStreamer(streamer) +} +func (n *ExecutionNode) MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 { + return n.ExecEngine.MessageIndexToBlockNumber(messageNum) +} + +func (n *ExecutionNode) Maintenance() error { + return n.ChainDB.Compact(nil, nil) +} diff --git a/arbnode/execution/sequencer.go b/execution/gethexec/sequencer.go similarity index 94% rename from arbnode/execution/sequencer.go rename to execution/gethexec/sequencer.go index 927ce7ac08..77442f65e4 100644 --- a/arbnode/execution/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package execution +package gethexec import ( "context" @@ -16,6 +16,7 @@ import ( "time" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/headerreader" @@ -53,35 +54,18 @@ var ( ) type SequencerConfig struct { - Enable bool `koanf:"enable"` - MaxBlockSpeed time.Duration `koanf:"max-block-speed" reload:"hot"` - MaxRevertGasReject uint64 `koanf:"max-revert-gas-reject" reload:"hot"` - MaxAcceptableTimestampDelta time.Duration `koanf:"max-acceptable-timestamp-delta" reload:"hot"` - SenderWhitelist string `koanf:"sender-whitelist"` - Forwarder ForwarderConfig `koanf:"forwarder"` - QueueSize int `koanf:"queue-size"` - QueueTimeout time.Duration `koanf:"queue-timeout" reload:"hot"` - NonceCacheSize int `koanf:"nonce-cache-size" reload:"hot"` - MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` - NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` - NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` - Dangerous DangerousSequencerConfig `koanf:"dangerous"` -} - -type DangerousSequencerConfig struct { - NoCoordinator bool `koanf:"no-coordinator"` -} - -var DefaultDangerousSequencerConfig = DangerousSequencerConfig{ - NoCoordinator: false, -} - -var TestDangerousSequencerConfig = DangerousSequencerConfig{ - NoCoordinator: true, -} - -func DangerousSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".no-coordinator", DefaultDangerousSequencerConfig.NoCoordinator, "DANGEROUS! allows sequencer without coordinator.") + Enable bool `koanf:"enable"` + MaxBlockSpeed time.Duration `koanf:"max-block-speed" reload:"hot"` + MaxRevertGasReject uint64 `koanf:"max-revert-gas-reject" reload:"hot"` + MaxAcceptableTimestampDelta time.Duration `koanf:"max-acceptable-timestamp-delta" reload:"hot"` + SenderWhitelist string `koanf:"sender-whitelist"` + Forwarder ForwarderConfig `koanf:"forwarder"` + QueueSize int `koanf:"queue-size"` + QueueTimeout time.Duration `koanf:"queue-timeout" reload:"hot"` + NonceCacheSize int `koanf:"nonce-cache-size" reload:"hot"` + MaxTxDataSize int `koanf:"max-tx-data-size" reload:"hot"` + NonceFailureCacheSize int `koanf:"nonce-failure-cache-size" reload:"hot"` + NonceFailureCacheExpiry time.Duration `koanf:"nonce-failure-cache-expiry" reload:"hot"` } func (c *SequencerConfig) Validate() error { @@ -108,7 +92,6 @@ var DefaultSequencerConfig = SequencerConfig{ QueueSize: 1024, QueueTimeout: time.Second * 12, NonceCacheSize: 1024, - Dangerous: DefaultDangerousSequencerConfig, // 95% of the default batch poster limit, leaving 5KB for headers and such // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxTxDataSize: 95000, @@ -126,7 +109,6 @@ var TestSequencerConfig = SequencerConfig{ QueueSize: 128, QueueTimeout: time.Second * 5, NonceCacheSize: 4, - Dangerous: TestDangerousSequencerConfig, MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, @@ -145,7 +127,6 @@ func SequencerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-tx-data-size", DefaultSequencerConfig.MaxTxDataSize, "maximum transaction size the sequencer will accept") f.Int(prefix+".nonce-failure-cache-size", DefaultSequencerConfig.NonceFailureCacheSize, "number of transactions with too high of a nonce to keep in memory while waiting for their predecessor") f.Duration(prefix+".nonce-failure-cache-expiry", DefaultSequencerConfig.NonceFailureCacheExpiry, "maximum amount of time to wait for a predecessor before rejecting a tx with nonce too high") - DangerousSequencerConfigAddOptions(prefix+".dangerous", f) } type txQueueItem struct { @@ -373,8 +354,6 @@ func (s *Sequencer) onNonceFailureEvict(_ addressAndNonce, failure *nonceFailure } } -var ErrRetrySequencer = errors.New("please retry transaction") - // ctxWithTimeout is like context.WithTimeout except a timeout of 0 means unlimited instead of instantly expired. func ctxWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { if timeout == time.Duration(0) { @@ -878,7 +857,7 @@ func (s *Sequencer) createBlock(ctx context.Context) (returnValue bool) { if err == nil && len(hooks.TxErrors) != len(txes) { err = fmt.Errorf("unexpected number of error results: %v vs number of txes %v", len(hooks.TxErrors), len(txes)) } - if errors.Is(err, ErrRetrySequencer) { + if errors.Is(err, execution.ErrRetrySequencer) { log.Warn("error sequencing transactions", "err", err) // we changed roles // forward if we have where to diff --git a/arbnode/execution/tx_pre_checker.go b/execution/gethexec/tx_pre_checker.go similarity index 99% rename from arbnode/execution/tx_pre_checker.go rename to execution/gethexec/tx_pre_checker.go index 7d9e26d16c..51ba88fec8 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/execution/gethexec/tx_pre_checker.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package execution +package gethexec import ( "context" diff --git a/execution/interface.go b/execution/interface.go new file mode 100644 index 0000000000..ef9409b9c1 --- /dev/null +++ b/execution/interface.go @@ -0,0 +1,83 @@ +package execution + +import ( + "context" + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/validator" +) + +type MessageResult struct { + BlockHash common.Hash + SendRoot common.Hash +} + +type RecordResult struct { + Pos arbutil.MessageIndex + BlockHash common.Hash + Preimages map[common.Hash][]byte + BatchInfo []validator.BatchInfo +} + +var ErrRetrySequencer = errors.New("please retry transaction") +var ErrSequencerInsertLockTaken = errors.New("insert lock taken") + +// always needed +type ExecutionClient interface { + DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata) error + Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error + HeadMessageNumber() (arbutil.MessageIndex, error) + HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) + ResultAtPos(pos arbutil.MessageIndex) (*MessageResult, error) +} + +// needed for validators / stakers +type ExecutionRecorder interface { + RecordBlockCreation( + ctx context.Context, + pos arbutil.MessageIndex, + msg *arbostypes.MessageWithMetadata, + ) (*RecordResult, error) + MarkValid(pos arbutil.MessageIndex, resultHash common.Hash) + PrepareForRecord(ctx context.Context, start, end arbutil.MessageIndex) error +} + +// needed for sequencer +type ExecutionSequencer interface { + ExecutionClient + Pause() + Activate() + ForwardTo(url string) error + SequenceDelayedMessage(message *arbostypes.L1IncomingMessage, delayedSeqNum uint64) error + NextDelayedMessageNumber() (uint64, error) + SetTransactionStreamer(streamer TransactionStreamer) +} + +type FullExecutionClient interface { + ExecutionClient + ExecutionRecorder + ExecutionSequencer + + Start(ctx context.Context) error + StopAndWait() + + Maintenance() error + + // TODO: only used to get safe/finalized block numbers + MessageIndexToBlockNumber(messageNum arbutil.MessageIndex) uint64 +} + +// not implemented in execution, used as input +type BatchFetcher interface { + FetchBatch(batchNum uint64) ([]byte, error) +} + +type TransactionStreamer interface { + BatchFetcher + WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error + ExpectChosenSequencer() error +} diff --git a/go-ethereum b/go-ethereum index 921163a16b..b4221631e1 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 921163a16b537c08b1a383663ce2c4f3bd84a3a0 +Subproject commit b4221631e1e5eac86f01582bd74234e3c0f7f5c7 diff --git a/go.mod b/go.mod index 4bc28b950c..cdfae4df16 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multihash v0.2.1 + github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 @@ -237,6 +238,8 @@ require ( github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect + github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect diff --git a/go.sum b/go.sum index db3935001a..db81b3a07e 100644 --- a/go.sum +++ b/go.sum @@ -1449,6 +1449,8 @@ github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= +github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= @@ -1599,6 +1601,10 @@ github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.9.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index d795345839..6e2e5650c3 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -74,7 +74,10 @@ func (n NodeInterface) GetL1Confirmations(c ctx, evm mech, blockHash bytes32) (u if node.InboxReader == nil { return 0, nil } - bc := node.Execution.ArbInterface.BlockChain() + bc, err := blockchainFromNodeInterfaceBackend(n.backend) + if err != nil { + return 0, err + } header := bc.GetHeaderByHash(blockHash) if header == nil { return 0, errors.New("unknown block hash") @@ -482,16 +485,16 @@ func (n NodeInterface) GasEstimateL1Component( func (n NodeInterface) GasEstimateComponents( c ctx, evm mech, value huge, to addr, contractCreation bool, data []byte, ) (uint64, uint64, huge, huge, error) { - node, err := arbNodeFromNodeInterfaceBackend(n.backend) - if err != nil { - return 0, 0, nil, nil, err - } if to == types.NodeInterfaceAddress || to == types.NodeInterfaceDebugAddress { return 0, 0, nil, nil, errors.New("cannot estimate virtual contract") } + backend, ok := n.backend.(*arbitrum.APIBackend) + if !ok { + return 0, 0, nil, nil, errors.New("failed getting API backend") + } + context := n.context - backend := node.Execution.Backend.APIBackend() gasCap := backend.RPCGasCap() block := rpc.BlockNumberOrHashWithHash(n.header.Hash(), false) args := n.messageArgs(evm, value, to, contractCreation, data) diff --git a/nodeInterface/virtual-contracts.go b/nodeInterface/virtual-contracts.go index ec375699b7..b35381a77a 100644 --- a/nodeInterface/virtual-contracts.go +++ b/nodeInterface/virtual-contracts.go @@ -184,3 +184,15 @@ func arbNodeFromNodeInterfaceBackend(backend BackendAPI) (*arbnode.Node, error) } return arbNode, nil } + +func blockchainFromNodeInterfaceBackend(backend BackendAPI) (*core.BlockChain, error) { + apiBackend, ok := backend.(*arbitrum.APIBackend) + if !ok { + return nil, errors.New("API backend isn't Arbitrum") + } + bc := apiBackend.BlockChain() + if bc == nil { + return nil, errors.New("failed to get Blockchain from backend") + } + return bc, nil +} diff --git a/staker/block_validator.go b/staker/block_validator.go index 94bc2a0806..108d6d1d49 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -750,7 +750,7 @@ func (v *BlockValidator) iterativeValidationProgress(ctx context.Context, ignore } else if reorg != nil { err := v.Reorg(ctx, *reorg) if err != nil { - log.Error("error trying to rorg validation", "pos", *reorg-1, "err", err) + log.Error("error trying to reorg validation", "pos", *reorg-1, "err", err) v.possiblyFatal(err) } } diff --git a/staker/staker.go b/staker/staker.go index d52d1adc77..4148d0a204 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -203,7 +203,7 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f, dataposter.DefaultDataPosterConfigForValidator) redislock.AddConfigOptions(prefix+".redis-lock", f) DangerousConfigAddOptions(prefix+".dangerous", f) genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index e290ffad67..c4968ca9e4 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -11,7 +11,7 @@ import ( "sync" "testing" - "github.com/offchainlabs/nitro/arbnode/execution" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/validator/server_api" @@ -33,7 +33,7 @@ type StatelessBlockValidator struct { execSpawner validator.ExecutionSpawner validationSpawners []validator.ValidationSpawner - recorder BlockRecorder + recorder execution.ExecutionRecorder inboxReader InboxReaderInterface inboxTracker InboxTrackerInterface @@ -50,16 +50,6 @@ type BlockValidatorRegistrer interface { SetBlockValidator(*BlockValidator) } -type BlockRecorder interface { - RecordBlockCreation( - ctx context.Context, - pos arbutil.MessageIndex, - msg *arbostypes.MessageWithMetadata, - ) (*execution.RecordResult, error) - MarkValid(pos arbutil.MessageIndex, resultHash common.Hash) - PrepareForRecord(ctx context.Context, start, end arbutil.MessageIndex) error -} - type InboxTrackerInterface interface { BlockValidatorRegistrer GetDelayedMessageBytes(uint64) ([]byte, error) @@ -234,7 +224,7 @@ func NewStatelessBlockValidator( inboxReader InboxReaderInterface, inbox InboxTrackerInterface, streamer TransactionStreamerInterface, - recorder BlockRecorder, + recorder execution.ExecutionRecorder, arbdb ethdb.Database, das arbstate.DataAvailabilityReader, config func() *BlockValidatorConfig, @@ -432,7 +422,7 @@ func (v *StatelessBlockValidator) ValidateResult( return true, &entry.End, nil } -func (v *StatelessBlockValidator) OverrideRecorder(t *testing.T, recorder BlockRecorder) { +func (v *StatelessBlockValidator) OverrideRecorder(t *testing.T, recorder execution.ExecutionRecorder) { v.recorder = recorder } diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 3ade358cee..302e4fb439 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -177,7 +177,7 @@ func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value) + return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) } func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { @@ -288,7 +288,7 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value) + arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value, nil) if err != nil { return nil, err } @@ -338,7 +338,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) ( if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value) + return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) } // gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate. diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index b2c9f68b56..d86181f42f 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -95,7 +95,7 @@ func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (* return nil, err } gas := baseTx.Gas() + w.getExtraGas() - newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) + newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value(), nil) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } diff --git a/system_tests/arbtrace_test.go b/system_tests/arbtrace_test.go index 78907aa622..a4995e3979 100644 --- a/system_tests/arbtrace_test.go +++ b/system_tests/arbtrace_test.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -144,10 +144,10 @@ func TestArbTraceForwarding(t *testing.T) { defer srv.Stop() defer listener.Close() - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.RPC.ClassicRedirect = ipcPath - nodeConfig.RPC.ClassicRedirectTimeout = time.Second - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nodeConfig, nil, nil, nil) + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.ClassicRedirect = ipcPath + execConfig.RPC.ClassicRedirectTimeout = time.Second + _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, execConfig, nil, nil, nil) defer requireClose(t, l1stack) defer requireClose(t, l2stack) diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 8b0811c223..4ea2a16c07 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -48,7 +49,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { conf := arbnode.ConfigDefaultL1Test() conf.BatchPoster.Enable = false conf.BatchPoster.RedisUrl = redisUrl - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil) + l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() @@ -82,7 +83,19 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { for i := 0; i < parallelBatchPosters; i++ { // Make a copy of the batch poster config so NewBatchPoster calling Validate() on it doesn't race batchPosterConfig := conf.BatchPoster - batchPoster, err := arbnode.NewBatchPoster(ctx, nil, nodeA.L1Reader, nodeA.InboxTracker, nodeA.TxStreamer, nodeA.SyncMonitor, func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, nodeA.DeployInfo, &seqTxOpts, nil) + batchPoster, err := arbnode.NewBatchPoster(ctx, + &arbnode.BatchPosterOpts{ + DataPosterDB: nil, + L1Reader: nodeA.L1Reader, + Inbox: nodeA.InboxTracker, + Streamer: nodeA.TxStreamer, + SyncMonitor: nodeA.SyncMonitor, + Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, + DeployInfo: nodeA.DeployInfo, + TransactOpts: &seqTxOpts, + DAWriter: nil, + }, + ) Require(t, err) batchPoster.Start(ctx) defer batchPoster.StopAndWait() @@ -103,6 +116,8 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } } + // TODO: factor this out in separate test case and skip it or delete this + // code entirely. // I've locally confirmed that this passes when the clique period is set to 1. // However, setting the clique period to 1 slows everything else (including the L1 deployment for this test) down to a crawl. if false { @@ -142,9 +157,9 @@ func TestBatchPosterLargeTx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf := arbnode.ConfigDefaultL1Test() + conf := gethexec.ConfigDefaultTest() conf.Sequencer.MaxTxDataSize = 110000 - l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil) + l2info, nodeA, l2clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, conf, nil, nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() @@ -176,8 +191,9 @@ func TestBatchPosterKeepsUp(t *testing.T) { conf := arbnode.ConfigDefaultL1Test() conf.BatchPoster.CompressionLevel = brotli.BestCompression conf.BatchPoster.MaxDelay = time.Hour - conf.RPC.RPCTxFeeCap = 1000. - l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil) + execConf := gethexec.ConfigDefaultTest() + execConf.RPC.RPCTxFeeCap = 1000. + l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, execConf, nil, nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() l2info.GasPrice = big.NewInt(100e9) diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index fa3d902b18..1699346b17 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -21,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" ) @@ -50,7 +51,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops delayEvery = workloadLoops / 3 } - l2info, nodeA, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, chainConfig, nil) + l2info, nodeA, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() @@ -61,7 +62,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability validatorConfig.DataAvailability.RPCAggregator.Enable = false AddDefaultValNode(t, ctx, validatorConfig, !arbitrator) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, validatorConfig, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, validatorConfig, nil, nil) defer nodeB.StopAndWait() l2info.GenerateAccount("User2") @@ -190,8 +191,12 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops if !nodeB.BlockValidator.WaitForPos(t, ctx, arbutil.MessageIndex(lastBlock.NumberU64()), timeout) { Fatal(t, "did not validate all blocks") } - nodeB.Execution.Recorder.TrimAllPrepared(t) - finalRefCount := nodeB.Execution.Recorder.RecordingDBReferenceCount() + gethExec, ok := nodeB.Execution.(*gethexec.ExecutionNode) + if !ok { + t.Fail() + } + gethExec.Recorder.TrimAllPrepared(t) + finalRefCount := gethExec.Recorder.RecordingDBReferenceCount() lastBlockNow, err := l2clientB.BlockByNumber(ctx, nil) Require(t, err) // up to 3 extra references: awaiting validation, recently valid, lastValidatedHeader diff --git a/system_tests/bloom_test.go b/system_tests/bloom_test.go index 9ad3253d4a..14c42f6a2f 100644 --- a/system_tests/bloom_test.go +++ b/system_tests/bloom_test.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) @@ -25,10 +25,10 @@ func TestBloom(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeconfig := arbnode.ConfigDefaultL2Test() - nodeconfig.RPC.BloomBitsBlocks = 256 - nodeconfig.RPC.BloomConfirms = 1 - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nodeconfig, false) + execconfig := gethexec.ConfigDefaultTest() + execconfig.RPC.BloomBitsBlocks = 256 + execconfig.RPC.BloomConfirms = 1 + l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, execconfig, false) defer node.StopAndWait() l2info.GenerateAccount("User2") @@ -80,9 +80,9 @@ func TestBloom(t *testing.T) { t.Log("counts: ", i, "/", countsNum) } } - + execNode := getExecNode(t, node) for { - sectionSize, sectionNum := node.Execution.Backend.APIBackend().BloomStatus() + sectionSize, sectionNum := execNode.Backend.APIBackend().BloomStatus() if sectionSize != 256 { Fatal(t, "unexpected section size: ", sectionSize) } diff --git a/system_tests/common_test.go b/system_tests/common_test.go index ba666175b6..b3965c5c85 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -16,7 +16,6 @@ import ( "testing" "time" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbstate" @@ -24,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" @@ -500,13 +500,13 @@ func DeployOnTestL1( } func createL2BlockChain( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, cacheConfig *execution.CachingConfig, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, cacheConfig *gethexec.CachingConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { return createL2BlockChainWithStackConfig(t, l2info, dataDir, chainConfig, nil, nil, cacheConfig) } func createL2BlockChainWithStackConfig( - t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, cacheConfig *execution.CachingConfig, + t *testing.T, l2info *BlockchainTestInfo, dataDir string, chainConfig *params.ChainConfig, initMessage *arbostypes.ParsedInitMessage, stackConfig *node.Config, cacheConfig *gethexec.CachingConfig, ) (*BlockchainTestInfo, *node.Node, ethdb.Database, ethdb.Database, *core.BlockChain) { if l2info == nil { l2info = NewArbTestInfo(t, chainConfig.ChainID) @@ -539,9 +539,9 @@ func createL2BlockChainWithStackConfig( } var coreCacheConfig *core.CacheConfig if cacheConfig != nil { - coreCacheConfig = execution.DefaultCacheConfigFor(stack, cacheConfig) + coreCacheConfig = gethexec.DefaultCacheConfigFor(stack, cacheConfig) } - blockchain, err := execution.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) + blockchain, err := gethexec.WriteOrTestBlockChain(chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) Require(t, err) return l2info, stack, chainDb, arbDb, blockchain @@ -562,7 +562,7 @@ func createTestNodeOnL1( l2info info, node *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, ) { - return createTestNodeOnL1WithConfig(t, ctx, isSequencer, nil, nil, nil) + return createTestNodeOnL1WithConfig(t, ctx, isSequencer, nil, nil, nil, nil) } func createTestNodeOnL1WithConfig( @@ -570,13 +570,14 @@ func createTestNodeOnL1WithConfig( ctx context.Context, isSequencer bool, nodeConfig *arbnode.Config, + execConfig *gethexec.Config, chainConfig *params.ChainConfig, stackConfig *node.Config, ) ( l2info info, currentNode *arbnode.Node, l2client *ethclient.Client, l1info info, l1backend *eth.Ethereum, l1client *ethclient.Client, l1stack *node.Node, ) { - l2info, currentNode, l2client, _, l1info, l1backend, l1client, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, isSequencer, nodeConfig, chainConfig, stackConfig, nil) + l2info, currentNode, l2client, _, l1info, l1backend, l1client, l1stack = createTestNodeOnL1WithConfigImpl(t, ctx, isSequencer, nodeConfig, execConfig, chainConfig, stackConfig, nil) return } @@ -585,6 +586,7 @@ func createTestNodeOnL1WithConfigImpl( ctx context.Context, isSequencer bool, nodeConfig *arbnode.Config, + execConfig *gethexec.Config, chainConfig *params.ChainConfig, stackConfig *node.Config, l2info_in info, @@ -595,6 +597,9 @@ func createTestNodeOnL1WithConfigImpl( if nodeConfig == nil { nodeConfig = arbnode.ConfigDefaultL1Test() } + if execConfig == nil { + execConfig = gethexec.ConfigDefaultTest() + } if chainConfig == nil { chainConfig = params.ArbitrumDevTestChainConfig() } @@ -608,7 +613,7 @@ func createTestNodeOnL1WithConfigImpl( l2info = NewArbTestInfo(t, chainConfig.ChainID) } addresses, initMessage := DeployOnTestL1(t, ctx, l1info, l1client, chainConfig) - _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &nodeConfig.Caching) + _, l2stack, l2chainDb, l2arbDb, l2blockchain = createL2BlockChainWithStackConfig(t, l2info, "", chainConfig, initMessage, stackConfig, &execConfig.Caching) var sequencerTxOptsPtr *bind.TransactOpts var dataSigner signature.DataSignerFunc if isSequencer { @@ -619,15 +624,20 @@ func createTestNodeOnL1WithConfigImpl( if !isSequencer { nodeConfig.BatchPoster.Enable = false - nodeConfig.Sequencer.Enable = false + nodeConfig.Sequencer = false nodeConfig.DelayedSequencer.Enable = false + execConfig.Sequencer.Enable = false } AddDefaultValNode(t, ctx, nodeConfig, true) - var err error + Require(t, execConfig.Validate()) + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, execConfigFetcher) + Require(t, err) + currentNode, err = arbnode.CreateNode( - ctx, l2stack, l2chainDb, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain, l1client, + ctx, l2stack, execNode, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, fatalErrChan, ) Require(t, err) @@ -644,18 +654,31 @@ func createTestNodeOnL1WithConfigImpl( // L2 -Only. Enough for tests that needs no interface to L1 // Requires precompiles.AllowDebugPrecompiles = true func CreateTestL2(t *testing.T, ctx context.Context) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { - return CreateTestL2WithConfig(t, ctx, nil, arbnode.ConfigDefaultL2Test(), true) + return CreateTestL2WithConfig(t, ctx, nil, nil, nil, true) } func CreateTestL2WithConfig( - t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, takeOwnership bool, + t *testing.T, ctx context.Context, l2Info *BlockchainTestInfo, nodeConfig *arbnode.Config, execConfig *gethexec.Config, takeOwnership bool, ) (*BlockchainTestInfo, *arbnode.Node, *ethclient.Client) { + if nodeConfig == nil { + nodeConfig = arbnode.ConfigDefaultL2Test() + } + if execConfig == nil { + execConfig = gethexec.ConfigDefaultTest() + } + feedErrChan := make(chan error, 10) AddDefaultValNode(t, ctx, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", params.ArbitrumDevTestChainConfig(), &nodeConfig.Caching) - currentNode, err := arbnode.CreateNode(ctx, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, nil, nil, nil, nil, feedErrChan) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, l2Info, "", params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + Require(t, execConfig.Validate()) + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + currentNode, err := arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(nodeConfig), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan) Require(t, err) // Give the node an init message @@ -723,7 +746,7 @@ func Create2ndNode( } else { nodeConf.DataAvailability = *dasConfig } - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConf, nil) + return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConf, nil, nil) } func Create2ndNodeWithConfig( @@ -734,8 +757,15 @@ func Create2ndNodeWithConfig( l1info *BlockchainTestInfo, l2InitData *statetransfer.ArbosInitializationInfo, nodeConfig *arbnode.Config, + execConfig *gethexec.Config, stackConfig *node.Config, ) (*ethclient.Client, *arbnode.Node) { + if nodeConfig == nil { + nodeConfig = arbnode.ConfigDefaultL1NonSequencerTest() + } + if execConfig == nil { + execConfig = gethexec.ConfigDefaultNonSequencerTest() + } feedErrChan := make(chan error, 10) l1rpcClient, err := l1stack.Attach() if err != nil { @@ -757,16 +787,23 @@ func Create2ndNodeWithConfig( dataSigner := signature.DataSignerFromPrivateKey(l1info.GetInfoWithPrivKey("Sequencer").PrivateKey) txOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) - chainConfig := first.Execution.ArbInterface.BlockChain().Config() + firstExec := getExecNode(t, first) + + chainConfig := firstExec.ArbInterface.BlockChain().Config() initMessage := getInitMessage(ctx, t, l1client, first.DeployInfo) - coreCacheConfig := execution.DefaultCacheConfigFor(l2stack, &nodeConfig.Caching) - l2blockchain, err := execution.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, arbnode.ConfigDefaultL2Test().TxLookupLimit, 0) + coreCacheConfig := gethexec.DefaultCacheConfigFor(l2stack, &execConfig.Caching) + l2blockchain, err := gethexec.WriteOrTestBlockChain(l2chainDb, coreCacheConfig, initReader, chainConfig, initMessage, gethexec.ConfigDefaultTest().TxLookupLimit, 0) Require(t, err) AddDefaultValNode(t, ctx, nodeConfig, true) - currentNode, err := arbnode.CreateNode(ctx, l2stack, l2chainDb, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain, l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan) + Require(t, execConfig.Validate()) + configFetcher := func() *gethexec.Config { return execConfig } + currentExec, err := gethexec.CreateExecutionNode(ctx, l2stack, l2chainDb, l2blockchain, l1client, configFetcher) + Require(t, err) + + currentNode, err := arbnode.CreateNode(ctx, l2stack, currentExec, l2arbDb, NewFetcherFromConfig(nodeConfig), l2blockchain.Config(), l1client, first.DeployInfo, &txOpts, &txOpts, dataSigner, feedErrChan) Require(t, err) err = currentNode.Start(ctx) @@ -935,3 +972,12 @@ func TestMain(m *testing.M) { code := m.Run() os.Exit(code) } + +func getExecNode(t *testing.T, node *arbnode.Node) *gethexec.ExecutionNode { + t.Helper() + gethExec, ok := node.Execution.(*gethexec.ExecutionNode) + if !ok { + t.Fatal("failed to get exec node from arbnode") + } + return gethExec +} diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index 14aa000313..211908a883 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -20,23 +20,22 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" ) -func getStorageRootHash(t *testing.T, node *arbnode.Node, address common.Address) common.Hash { +func getStorageRootHash(t *testing.T, execNode *gethexec.ExecutionNode, address common.Address) common.Hash { t.Helper() - statedb, err := node.Execution.Backend.ArbInterface().BlockChain().State() + statedb, err := execNode.Backend.ArbInterface().BlockChain().State() Require(t, err) trie, err := statedb.StorageTrie(address) Require(t, err) return trie.Hash() } -func getStorageSlotValue(t *testing.T, node *arbnode.Node, address common.Address) map[common.Hash]common.Hash { +func getStorageSlotValue(t *testing.T, execNode *gethexec.ExecutionNode, address common.Address) map[common.Hash]common.Hash { t.Helper() - statedb, err := node.Execution.Backend.ArbInterface().BlockChain().State() + statedb, err := execNode.Backend.ArbInterface().BlockChain().State() Require(t, err) slotValue := make(map[common.Hash]common.Hash) Require(t, err) @@ -207,6 +206,7 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { defer requireClose(t, l1stack) defer node.StopAndWait() + execNode := getExecNode(t, node) auth := l2info.GetDefaultTransactOpts("Owner", ctx) contractAddress1, simple1 := deploySimple(t, ctx, auth, l2client) tx, err := simple1.Increment(&auth) @@ -223,10 +223,10 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { _, err = EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - currentRootHash1 := getStorageRootHash(t, node, contractAddress1) - currentSlotValueMap1 := getStorageSlotValue(t, node, contractAddress1) - currentRootHash2 := getStorageRootHash(t, node, contractAddress2) - currentSlotValueMap2 := getStorageSlotValue(t, node, contractAddress2) + currentRootHash1 := getStorageRootHash(t, execNode, contractAddress1) + currentSlotValueMap1 := getStorageSlotValue(t, execNode, contractAddress1) + currentRootHash2 := getStorageRootHash(t, execNode, contractAddress2) + currentSlotValueMap2 := getStorageSlotValue(t, execNode, contractAddress2) rpcClient, err := node.Stack.Attach() Require(t, err) @@ -262,18 +262,18 @@ func TestSendRawTransactionConditionalBasic(t *testing.T) { Require(t, err) previousStorageRootHash1 := currentRootHash1 - currentRootHash1 = getStorageRootHash(t, node, contractAddress1) + currentRootHash1 = getStorageRootHash(t, execNode, contractAddress1) if bytes.Equal(previousStorageRootHash1.Bytes(), currentRootHash1.Bytes()) { Fatal(t, "storage root hash didn't change as expected") } - currentSlotValueMap1 = getStorageSlotValue(t, node, contractAddress1) + currentSlotValueMap1 = getStorageSlotValue(t, execNode, contractAddress1) previousStorageRootHash2 := currentRootHash2 - currentRootHash2 = getStorageRootHash(t, node, contractAddress2) + currentRootHash2 = getStorageRootHash(t, execNode, contractAddress2) if bytes.Equal(previousStorageRootHash2.Bytes(), currentRootHash2.Bytes()) { Fatal(t, "storage root hash didn't change as expected") } - currentSlotValueMap2 = getStorageSlotValue(t, node, contractAddress2) + currentSlotValueMap2 = getStorageSlotValue(t, execNode, contractAddress2) block, err = l1client.BlockByNumber(ctx, nil) Require(t, err) @@ -367,7 +367,8 @@ func TestSendRawTransactionConditionalMultiRoutine(t *testing.T) { } cancelCtxWithTimeout() wg.Wait() - bc := node.Execution.Backend.ArbInterface().BlockChain() + execNode := getExecNode(t, node) + bc := execNode.Backend.ArbInterface().BlockChain() genesis := bc.Config().ArbitrumChainParams.GenesisBlockNum var receipts types.Receipts @@ -403,17 +404,18 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.TxPreChecker.Strictness = execution.TxPreCheckerStrictnessLikelyCompatible - nodeConfig.TxPreChecker.RequiredStateAge = 1 - nodeConfig.TxPreChecker.RequiredStateMaxBlocks = 2 + execConfig := gethexec.ConfigDefaultTest() + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible + execConfig.TxPreChecker.RequiredStateAge = 1 + execConfig.TxPreChecker.RequiredStateMaxBlocks = 2 - l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, nil) + l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) defer requireClose(t, l1stack) defer node.StopAndWait() rpcClient, err := node.Stack.Attach() Require(t, err) + execNode := getExecNode(t, node) l2info.GenerateAccount("User2") @@ -428,7 +430,7 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { Require(t, err, "failed to call Increment()") _, err = EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - currentRootHash := getStorageRootHash(t, node, contractAddress) + currentRootHash := getStorageRootHash(t, execNode, contractAddress) options := &arbitrum_types.ConditionalOptions{ KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{ contractAddress: {RootHash: ¤tRootHash}, @@ -447,7 +449,7 @@ func TestSendRawTransactionConditionalPreCheck(t *testing.T) { Require(t, err, "failed to call Increment()") _, err = EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) - currentRootHash = getStorageRootHash(t, node, contractAddress) + currentRootHash = getStorageRootHash(t, execNode, contractAddress) options = &arbitrum_types.ConditionalOptions{ KnownAccounts: map[common.Address]arbitrum_types.RootHashOrSlots{ contractAddress: {RootHash: ¤tRootHash}, diff --git a/system_tests/contract_tx_test.go b/system_tests/contract_tx_test.go index e671dcc6ac..d6c2eb5f38 100644 --- a/system_tests/contract_tx_test.go +++ b/system_tests/contract_tx_test.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/util/arbmath" @@ -26,8 +25,7 @@ func TestContractTxDeploy(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeconfig := arbnode.ConfigDefaultL2Test() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nodeconfig, false) + l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, nil, false) defer node.StopAndWait() from := common.HexToAddress("0x123412341234") diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 8889d2d53d..8c1588273b 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -22,11 +22,11 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" @@ -138,8 +138,10 @@ func TestDASRekey(t *testing.T) { l1NodeConfigA.DataAvailability.RestAggregator.Enable = true l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" + execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) + Require(t, err) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -152,7 +154,7 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) nodeA.StopAndWait() nodeB.StopAndWait() @@ -178,15 +180,19 @@ func TestDASRekey(t *testing.T) { l2arbDb, err := l2stackA.OpenDatabase("arbdb", 0, 0, "", false) Require(t, err) - l2blockchain, err := execution.GetBlockChain(l2chainDb, nil, chainConfig, arbnode.ConfigDefaultL2Test().TxLookupLimit) + l2blockchain, err := gethexec.GetBlockChain(l2chainDb, nil, chainConfig, gethexec.ConfigDefaultTest().TxLookupLimit) + Require(t, err) + + execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) Require(t, err) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) - nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(2e12), l2clientB) nodeA.StopAndWait() @@ -309,9 +315,12 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l2info, l2stackA, l2chainDb, l2arbDb, l2blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) l2info.GenerateAccount("User2") + execA, err := gethexec.CreateExecutionNode(ctx, l2stackA, l2chainDb, l2blockchain, l1client, gethexec.ConfigDefaultTest) + Require(t, err) + sequencerTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) sequencerTxOptsPtr := &sequencerTxOpts - nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan) + nodeA, err := arbnode.CreateNode(ctx, l2stackA, execA, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain.Config(), l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, dataSigner, feedErrChan) Require(t, err) Require(t, nodeA.Start(ctx)) l2clientA := ClientForStack(t, l2stackA) @@ -333,7 +342,7 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { l1NodeConfigB.DataAvailability.RestAggregator.Enable = true l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index 03e3dfd405..ff28e2350c 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -14,7 +14,7 @@ import ( func TestDebugAPI(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, nil, nil) + _, _, _, l2stack, _, _, _, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, nil, nil, nil) defer requireClose(t, l1stack) defer requireClose(t, l2stack) diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index 26b5a78145..9f2db62dab 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -185,8 +185,9 @@ func TestComponentEstimate(t *testing.T) { baseFee, _ := outputs[2].(*big.Int) l1BaseFeeEstimate, _ := outputs[3].(*big.Int) + execNode := getExecNode(t, node) tx := l2info.SignTxAs("User", &types.DynamicFeeTx{ - ChainID: node.Execution.ArbInterface.BlockChain().Config().ChainID, + ChainID: execNode.ArbInterface.BlockChain().Config().ChainID, Nonce: 0, GasTipCap: maxPriorityFeePerGas, GasFeeCap: maxFeePerGas, diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index ea7edc2ee8..750293622d 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -37,7 +37,8 @@ func TestSequencerFeePaid(t *testing.T) { defer requireClose(t, l1stack) defer l2node.StopAndWait() - version := l2node.Execution.ArbInterface.BlockChain().Config().ArbitrumChainParams.InitialArbOSVersion + execNode := getExecNode(t, l2node) + version := execNode.ArbInterface.BlockChain().Config().ArbitrumChainParams.InitialArbOSVersion callOpts := l2info.GetDefaultCallOpts("Owner", ctx) // get the network fee account @@ -135,7 +136,7 @@ func testSequencerPriceAdjustsFrom(t *testing.T, initialEstimate uint64) { conf := arbnode.ConfigDefaultL1Test() conf.DelayedSequencer.FinalizeDistance = 1 - l2info, node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, chainConfig, nil) + l2info, node, l2client, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, chainConfig, nil) defer requireClose(t, l1stack) defer node.StopAndWait() diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 0a954719d8..2e0544cc26 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -19,8 +19,8 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/node" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -40,18 +40,20 @@ func TestStaticForwarder(t *testing.T) { nodeConfigA := arbnode.ConfigDefaultL1Test() nodeConfigA.BatchPoster.Enable = false - l2info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, stackConfig) + l2info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, nil, stackConfig) defer requireClose(t, l1stack) defer nodeA.StopAndWait() nodeConfigB := arbnode.ConfigDefaultL1Test() - nodeConfigB.Sequencer.Enable = false + execConfigB := gethexec.ConfigDefaultTest() + execConfigB.Sequencer.Enable = false + nodeConfigB.Sequencer = false nodeConfigB.DelayedSequencer.Enable = false - nodeConfigB.Forwarder.RedisUrl = "" - nodeConfigB.ForwardingTarget = ipcPath + execConfigB.Forwarder.RedisUrl = "" + execConfigB.ForwardingTarget = ipcPath nodeConfigB.BatchPoster.Enable = false - clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nodeConfigB, nil) + clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nodeConfigB, execConfigB, nil) defer nodeB.StopAndWait() l2info.GenerateAccount("User2") @@ -105,7 +107,7 @@ func fallbackSequencer( nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl nodeConfig.SeqCoordinator.MyUrl = opts.ipcPath - return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, stackConfig) + return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, nil, stackConfig) } func createForwardingNode( @@ -125,14 +127,16 @@ func createForwardingNode( ipcConfig.Apply(stackConfig) } nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.Sequencer.Enable = false + nodeConfig.Sequencer = false nodeConfig.DelayedSequencer.Enable = false nodeConfig.BatchPoster.Enable = false - nodeConfig.Forwarder.RedisUrl = redisUrl - nodeConfig.ForwardingTarget = fallbackPath + execConfig := gethexec.ConfigDefaultTest() + execConfig.Sequencer.Enable = false + execConfig.Forwarder.RedisUrl = redisUrl + execConfig.ForwardingTarget = fallbackPath // nodeConfig.Feed.Output.Enable = false - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) + return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, execConfig, stackConfig) } func createSequencer( @@ -154,7 +158,7 @@ func createSequencer( nodeConfig.SeqCoordinator.RedisUrl = redisUrl nodeConfig.SeqCoordinator.MyUrl = ipcPath - return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) + return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, gethexec.ConfigDefaultTest(), stackConfig) } // tmpPath returns file path with specified filename from temporary directory of the test. @@ -289,7 +293,7 @@ func TestRedisForwarder(t *testing.T) { tx := l2info.PrepareTx(userA, userB, l2info.TransferGas, transferAmount, nil) sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } - if err := tryWithTimeout(ctx, sendFunc, execution.DefaultTestForwarderConfig.UpdateInterval*10); err != nil { + if err := tryWithTimeout(ctx, sendFunc, gethexec.DefaultTestForwarderConfig.UpdateInterval*10); err != nil { t.Fatalf("Client: %v, error sending transaction: %v", i, err) } _, err := EnsureTxSucceeded(ctx, seqClients[i], tx) @@ -332,7 +336,7 @@ func TestRedisForwarderFallbackNoRedis(t *testing.T) { l2info.GenerateAccount(user) tx := l2info.PrepareTx("Owner", "User2", l2info.TransferGas, transferAmount, nil) sendFunc := func() error { return forwardingClient.SendTransaction(ctx, tx) } - err := tryWithTimeout(ctx, sendFunc, execution.DefaultTestForwarderConfig.UpdateInterval*10) + err := tryWithTimeout(ctx, sendFunc, gethexec.DefaultTestForwarderConfig.UpdateInterval*10) Require(t, err) _, err = EnsureTxSucceeded(ctx, fallbackClient, tx) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index b64a655c3e..99064d1913 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -31,6 +31,7 @@ import ( "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" @@ -270,7 +271,9 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) asserterRollupAddresses.Bridge = asserterBridgeAddr asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr - asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, NewFetcherFromConfig(conf), asserterL2Blockchain, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan) + asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) + Require(t, err) + asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan) Require(t, err) err = asserterL2.Start(ctx) Require(t, err) @@ -279,7 +282,9 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall challengerRollupAddresses := *asserterRollupAddresses challengerRollupAddresses.Bridge = challengerBridgeAddr challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr - challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, NewFetcherFromConfig(conf), challengerL2Blockchain, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan) + challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) + Require(t, err) + challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan) Require(t, err) err = challengerL2.Start(ctx) Require(t, err) @@ -327,13 +332,13 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall } } - asserterGenesis := asserterL2.Execution.ArbInterface.BlockChain().Genesis() - challengerGenesis := challengerL2.Execution.ArbInterface.BlockChain().Genesis() + asserterGenesis := asserterExec.ArbInterface.BlockChain().Genesis() + challengerGenesis := challengerExec.ArbInterface.BlockChain().Genesis() if asserterGenesis.Hash() != challengerGenesis.Hash() { Fatal(t, "asserter and challenger have different genesis hashes") } - asserterLatestBlock := asserterL2.Execution.ArbInterface.BlockChain().CurrentBlock() - challengerLatestBlock := challengerL2.Execution.ArbInterface.BlockChain().CurrentBlock() + asserterLatestBlock := asserterExec.ArbInterface.BlockChain().CurrentBlock() + challengerLatestBlock := challengerExec.ArbInterface.BlockChain().CurrentBlock() if asserterLatestBlock.Hash() == challengerLatestBlock.Hash() { Fatal(t, "asserter and challenger have the same end block") } @@ -368,7 +373,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterL2.Execution.Recorder, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -385,7 +390,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerL2.Execution.Recorder, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/infra_fee_test.go b/system_tests/infra_fee_test.go index 89f869576d..a56e054563 100644 --- a/system_tests/infra_fee_test.go +++ b/system_tests/infra_fee_test.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/arbmath" @@ -23,9 +22,8 @@ func TestInfraFee(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeconfig := arbnode.ConfigDefaultL2Test() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nodeconfig, true) + l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, nil, true) defer node.StopAndWait() l2info.GenerateAccount("User2") diff --git a/system_tests/initialization_test.go b/system_tests/initialization_test.go index c7797d35e6..0e055adc5f 100644 --- a/system_tests/initialization_test.go +++ b/system_tests/initialization_test.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/statetransfer" "github.com/offchainlabs/nitro/util/testhelpers" ) @@ -63,7 +62,7 @@ func TestInitContract(t *testing.T) { l2info.ArbInitData.Accounts = append(l2info.ArbInitData.Accounts, accountInfo) expectedSums[accountAddress] = sum } - _, node, client := CreateTestL2WithConfig(t, ctx, l2info, arbnode.ConfigDefaultL2Test(), true) + _, node, client := CreateTestL2WithConfig(t, ctx, l2info, nil, nil, true) defer node.StopAndWait() for accountAddress, sum := range expectedSums { diff --git a/system_tests/ipc_test.go b/system_tests/ipc_test.go index 01ecf859d8..e25b4a21ea 100644 --- a/system_tests/ipc_test.go +++ b/system_tests/ipc_test.go @@ -24,7 +24,7 @@ func TestIpcRpc(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, l2node, _, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, nil, stackConf) + _, l2node, _, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, nil, nil, stackConf) defer requireClose(t, l1stack) defer l2node.StopAndWait() diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index 851bf38ce0..7fb6934e84 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -20,7 +20,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { defer cancel() conf := arbnode.ConfigDefaultL1Test() conf.BatchPoster.Enable = false - l2Info, arbNode, l2Client, l1Info, l1Backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil) + l2Info, arbNode, l2Client, l1Info, l1Backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil, nil) defer requireClose(t, l1stack) defer arbNode.StopAndWait() @@ -33,11 +33,12 @@ func TestMeaninglessBatchReorg(t *testing.T) { batchReceipt, err := EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) + execNode := getExecNode(t, arbNode) for i := 0; ; i++ { if i >= 500 { Fatal(t, "Failed to read batch from L1") } - msgNum, err := arbNode.Execution.ExecEngine.HeadMessageNumber() + msgNum, err := execNode.ExecEngine.HeadMessageNumber() Require(t, err) if msgNum == 1 { break diff --git a/system_tests/recreatestate_rpc_test.go b/system_tests/recreatestate_rpc_test.go index dbf68c8479..28e72b0653 100644 --- a/system_tests/recreatestate_rpc_test.go +++ b/system_tests/recreatestate_rpc_test.go @@ -17,13 +17,13 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util" ) -func prepareNodeWithHistory(t *testing.T, ctx context.Context, nodeConfig *arbnode.Config, txCount uint64) (node *arbnode.Node, executionNode *execution.ExecutionNode, l2client *ethclient.Client, cancel func()) { +func prepareNodeWithHistory(t *testing.T, ctx context.Context, execConfig *gethexec.Config, txCount uint64) (node *arbnode.Node, executionNode *gethexec.ExecutionNode, l2client *ethclient.Client, cancel func()) { t.Helper() - l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, nil) + l2info, node, l2client, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nil, execConfig, nil, nil) cancel = func() { defer requireClose(t, l1stack) defer node.StopAndWait() @@ -40,7 +40,9 @@ func prepareNodeWithHistory(t *testing.T, ctx context.Context, nodeConfig *arbno _, err := EnsureTxSucceeded(ctx, l2client, tx) Require(t, err) } - return node, node.Execution, l2client, cancel + exec := getExecNode(t, node) + + return node, exec, l2client, cancel } func fillHeaderCache(t *testing.T, bc *core.BlockChain, from, to uint64) { @@ -90,7 +92,7 @@ func removeStatesFromDb(t *testing.T, bc *core.BlockChain, db ethdb.Database, fr func TestRecreateStateForRPCNoDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig := gethexec.ConfigDefaultTest() nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 @@ -126,7 +128,7 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() depthGasLimit := int64(256 * util.NormalizeL2GasForL1GasInitial(800_000, params.GWei)) - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig := gethexec.ConfigDefaultTest() nodeConfig.RPC.MaxRecreateStateDepth = depthGasLimit nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 @@ -161,7 +163,7 @@ func TestRecreateStateForRPCBigEnoughDepthLimit(t *testing.T) { func TestRecreateStateForRPCDepthLimitExceeded(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig := gethexec.ConfigDefaultTest() nodeConfig.RPC.MaxRecreateStateDepth = int64(200) nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 @@ -196,7 +198,7 @@ func TestRecreateStateForRPCMissingBlockParent(t *testing.T) { var headerCacheLimit uint64 = 512 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig := gethexec.ConfigDefaultTest() nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 @@ -242,7 +244,7 @@ func TestRecreateStateForRPCBeyondGenesis(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig := gethexec.ConfigDefaultTest() nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 @@ -278,7 +280,7 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { var blockCacheLimit uint64 = 256 ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeConfig := arbnode.ConfigDefaultL1Test() + nodeConfig := gethexec.ConfigDefaultTest() nodeConfig.RPC.MaxRecreateStateDepth = arbitrum.InfiniteMaxRecreateStateDepth nodeConfig.Sequencer.MaxBlockSpeed = 0 nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 @@ -318,26 +320,30 @@ func TestRecreateStateForRPCBlockNotFoundWhileRecreating(t *testing.T) { } } -func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig *execution.CachingConfig, txCount int) { +func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig *gethexec.CachingConfig, txCount int) { maxRecreateStateDepth := int64(30 * 1000 * 1000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx1, cancel1 := context.WithCancel(ctx) - nodeConfig := arbnode.ConfigDefaultL2Test() - nodeConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth - nodeConfig.Sequencer.MaxBlockSpeed = 0 - nodeConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 - nodeConfig.Caching = *cacheConfig + execConfig := gethexec.ConfigDefaultTest() + execConfig.RPC.MaxRecreateStateDepth = maxRecreateStateDepth + execConfig.Sequencer.MaxBlockSpeed = 0 + execConfig.Sequencer.MaxTxDataSize = 150 // 1 test tx ~= 110 + execConfig.Caching = *cacheConfig - skipBlocks := nodeConfig.Caching.MaxNumberOfBlocksToSkipStateSaving - skipGas := nodeConfig.Caching.MaxAmountOfGasToSkipStateSaving + skipBlocks := execConfig.Caching.MaxNumberOfBlocksToSkipStateSaving + skipGas := execConfig.Caching.MaxAmountOfGasToSkipStateSaving feedErrChan := make(chan error, 10) - AddDefaultValNode(t, ctx1, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &nodeConfig.Caching) + l2info, stack, chainDb, arbDb, blockchain := createL2BlockChain(t, nil, t.TempDir(), params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + Require(t, execConfig.Validate()) + execConfigFetcher := func() *gethexec.Config { return execConfig } + execNode, err := gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) - node, err := arbnode.CreateNode(ctx1, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, nil, nil, nil, nil, feedErrChan) + node, err := arbnode.CreateNode(ctx1, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, nil, nil, nil, nil, feedErrChan) Require(t, err) err = node.TxStreamer.AddFakeInitMessage() Require(t, err) @@ -373,14 +379,17 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig cancel1() t.Log("stopped first node") - AddDefaultValNode(t, ctx, nodeConfig, true) - l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), &nodeConfig.Caching) - node, err = arbnode.CreateNode(ctx, stack, chainDb, arbDb, NewFetcherFromConfig(nodeConfig), blockchain, nil, node.DeployInfo, nil, nil, nil, feedErrChan) + l2info, stack, chainDb, arbDb, blockchain = createL2BlockChain(t, l2info, dataDir, params.ArbitrumDevTestChainConfig(), &execConfig.Caching) + + execNode, err = gethexec.CreateExecutionNode(ctx1, stack, chainDb, blockchain, nil, execConfigFetcher) + Require(t, err) + + node, err = arbnode.CreateNode(ctx, stack, execNode, arbDb, NewFetcherFromConfig(arbnode.ConfigDefaultL2Test()), blockchain.Config(), nil, node.DeployInfo, nil, nil, nil, feedErrChan) Require(t, err) Require(t, node.Start(ctx)) client = ClientForStack(t, stack) defer node.StopAndWait() - bc := node.Execution.Backend.ArbInterface().BlockChain() + bc := execNode.Backend.ArbInterface().BlockChain() gas := skipGas blocks := skipBlocks for i := genesis + 1; i <= genesis+uint64(txCount); i++ { @@ -423,7 +432,7 @@ func testSkippingSavingStateAndRecreatingAfterRestart(t *testing.T, cacheConfig } func TestSkippingSavingStateAndRecreatingAfterRestart(t *testing.T) { - cacheConfig := execution.DefaultCachingConfig + cacheConfig := gethexec.DefaultCachingConfig cacheConfig.Archive = true // disable caching of states in BlockChain.stateCache cacheConfig.TrieCleanCache = 0 diff --git a/system_tests/reorg_resequencing_test.go b/system_tests/reorg_resequencing_test.go index f132d46487..bdd4c4af45 100644 --- a/system_tests/reorg_resequencing_test.go +++ b/system_tests/reorg_resequencing_test.go @@ -22,6 +22,8 @@ func TestReorgResequencing(t *testing.T) { l2info, node, client := CreateTestL2(t, ctx) defer node.StopAndWait() + execNode := getExecNode(t, node) + startMsgCount, err := node.TxStreamer.GetMessageCount() Require(t, err) @@ -51,7 +53,7 @@ func TestReorgResequencing(t *testing.T) { err = node.TxStreamer.ReorgTo(startMsgCount) Require(t, err) - _, err = node.Execution.ExecEngine.HeadMessageNumberSync(t) + _, err = execNode.ExecEngine.HeadMessageNumberSync(t) Require(t, err) verifyBalances("after empty reorg") @@ -76,7 +78,7 @@ func TestReorgResequencing(t *testing.T) { }}) Require(t, err) - _, err = node.Execution.ExecEngine.HeadMessageNumberSync(t) + _, err = execNode.ExecEngine.HeadMessageNumberSync(t) Require(t, err) accountsWithBalance = append(accountsWithBalance, "User4") @@ -85,7 +87,7 @@ func TestReorgResequencing(t *testing.T) { err = node.TxStreamer.ReorgTo(startMsgCount) Require(t, err) - _, err = node.Execution.ExecEngine.HeadMessageNumberSync(t) + _, err = execNode.ExecEngine.HeadMessageNumberSync(t) Require(t, err) verifyBalances("after second empty reorg") diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index b1dd32d1dc..0fc6d24ed0 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -21,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/retryables" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -802,7 +803,7 @@ func elevateL2Basefee(t *testing.T, ctx context.Context, l2client *ethclient.Cli _, err = precompilesgen.NewArbosTest(common.HexToAddress("0x69"), l2client) Require(t, err, "failed to deploy ArbosTest") - burnAmount := arbnode.ConfigDefaultL1Test().RPC.RPCGasCap + burnAmount := gethexec.ConfigDefaultTest().RPC.RPCGasCap burnTarget := uint64(5 * l2pricing.InitialSpeedLimitPerSecondV6 * l2pricing.InitialBacklogTolerance) for i := uint64(0); i < (burnTarget+burnAmount)/burnAmount; i++ { burnArbGas := arbostestabi.Methods["burnArbGas"] diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index b1f50c9436..881e3b2658 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -17,9 +17,10 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/redisutil" ) @@ -62,7 +63,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { createStartNode := func(nodeNum int) { nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] - _, node, _ := CreateTestL2WithConfig(t, ctx, l2Info, nodeConfig, false) + _, node, _ := CreateTestL2WithConfig(t, ctx, l2Info, nodeConfig, nil, false) nodes[nodeNum] = node } @@ -144,7 +145,8 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { } nodeForwardTarget := func(nodeNum int) int { - fwTarget := nodes[nodeNum].Execution.TxPublisher.(*execution.TxPreChecker).TransactionPublisher.(*execution.Sequencer).ForwardTarget() + execNode := getExecNode(t, nodes[nodeNum]) + fwTarget := execNode.TxPublisher.(*gethexec.TxPreChecker).TransactionPublisher.(*gethexec.Sequencer).ForwardTarget() if fwTarget == "" { return -1 } @@ -278,7 +280,7 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) nodeConfig.SeqCoordinator.MyUrl = nodeNames[0] - l2Info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, params.ArbitrumDevTestChainConfig(), nil) + l2Info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, params.ArbitrumDevTestChainConfig(), nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() @@ -307,7 +309,7 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} } - clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2Info.ArbInitData, nodeConfig, nil) + clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2Info.ArbInitData, nodeConfig, nil, nil) defer nodeB.StopAndWait() tx := l2Info.PrepareTx("Owner", "User2", l2Info.TransferGas, big.NewInt(1e12), nil) diff --git a/system_tests/seq_nonce_test.go b/system_tests/seq_nonce_test.go index 968f141364..d70f47a146 100644 --- a/system_tests/seq_nonce_test.go +++ b/system_tests/seq_nonce_test.go @@ -15,7 +15,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -24,9 +24,9 @@ func TestSequencerParallelNonces(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := arbnode.ConfigDefaultL2Test() + config := gethexec.ConfigDefaultTest() config.Sequencer.NonceFailureCacheExpiry = time.Minute - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, config, false) + l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) defer node.StopAndWait() l2info.GenerateAccount("Destination") @@ -62,8 +62,8 @@ func TestSequencerNonceTooHigh(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := arbnode.ConfigDefaultL2Test() - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, config, false) + config := gethexec.ConfigDefaultTest() + l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) defer node.StopAndWait() l2info.GetInfoWithPrivKey("Owner").Nonce++ @@ -88,10 +88,10 @@ func TestSequencerNonceTooHighQueueFull(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := arbnode.ConfigDefaultL2Test() + config := gethexec.ConfigDefaultTest() config.Sequencer.NonceFailureCacheSize = 5 config.Sequencer.NonceFailureCacheExpiry = time.Minute - l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, config, false) + l2info, node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, false) defer node.StopAndWait() count := 15 diff --git a/system_tests/seq_pause_test.go b/system_tests/seq_pause_test.go index fd057c0181..3817768517 100644 --- a/system_tests/seq_pause_test.go +++ b/system_tests/seq_pause_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbnode/execution" + "github.com/offchainlabs/nitro/execution/gethexec" ) func TestSequencerPause(t *testing.T) { @@ -21,11 +21,12 @@ func TestSequencerPause(t *testing.T) { const numUsers = 100 - prechecker, ok := nodeA.Execution.TxPublisher.(*execution.TxPreChecker) + execA := getExecNode(t, nodeA) + prechecker, ok := execA.TxPublisher.(*gethexec.TxPreChecker) if !ok { t.Error("prechecker not found on node") } - sequencer, ok := prechecker.TransactionPublisher.(*execution.Sequencer) + sequencer, ok := prechecker.TransactionPublisher.(*gethexec.Sequencer) if !ok { t.Error("sequencer not found on node") } diff --git a/system_tests/seq_reject_test.go b/system_tests/seq_reject_test.go index 19c06c4bc3..34a14c660e 100644 --- a/system_tests/seq_reject_test.go +++ b/system_tests/seq_reject_test.go @@ -31,14 +31,14 @@ func TestSequencerRejection(t *testing.T) { seqNodeConfig := arbnode.ConfigDefaultL2Test() seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() feedErrChan := make(chan error, 10) - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, true) + l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) defer nodeA.StopAndWait() clientNodeConfig := arbnode.ConfigDefaultL2Test() port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, false) + _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) defer nodeB.StopAndWait() auth := l2info1.GetDefaultTransactOpts("Owner", ctx) diff --git a/system_tests/seq_whitelist_test.go b/system_tests/seq_whitelist_test.go index 2d671dcdd6..36e309a5d7 100644 --- a/system_tests/seq_whitelist_test.go +++ b/system_tests/seq_whitelist_test.go @@ -9,16 +9,16 @@ import ( "testing" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/execution/gethexec" ) func TestSequencerWhitelist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := arbnode.ConfigDefaultL2Test() + config := gethexec.ConfigDefaultTest() config.Sequencer.SenderWhitelist = GetTestAddressForAccountName(t, "Owner").String() + "," + GetTestAddressForAccountName(t, "User").String() - l2info, l2node, client := CreateTestL2WithConfig(t, ctx, nil, config, true) + l2info, l2node, client := CreateTestL2WithConfig(t, ctx, nil, nil, config, true) defer l2node.StopAndWait() l2info.GenerateAccount("User") diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 23c0e44c02..d0d05d569e 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -13,6 +13,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/broadcastclient" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/relay" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" @@ -44,13 +45,13 @@ func TestSequencerFeed(t *testing.T) { seqNodeConfig := arbnode.ConfigDefaultL2Test() seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, true) + l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) defer nodeA.StopAndWait() clientNodeConfig := arbnode.ConfigDefaultL2Test() port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, false) + _, nodeB, client2 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) defer nodeB.StopAndWait() l2info1.GenerateAccount("User2") @@ -79,7 +80,7 @@ func TestRelayedSequencerFeed(t *testing.T) { seqNodeConfig := arbnode.ConfigDefaultL2Test() seqNodeConfig.Feed.Output = *newBroadcasterConfigTest() - l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, true) + l2info1, nodeA, client1 := CreateTestL2WithConfig(t, ctx, nil, seqNodeConfig, nil, true) defer nodeA.StopAndWait() bigChainId, err := client1.ChainID(ctx) @@ -101,7 +102,7 @@ func TestRelayedSequencerFeed(t *testing.T) { clientNodeConfig := arbnode.ConfigDefaultL2Test() port = currentRelay.GetListenerAddr().(*net.TCPAddr).Port clientNodeConfig.Feed.Input = *newBroadcastClientConfigTest(port) - _, nodeC, client3 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, false) + _, nodeC, client3 := CreateTestL2WithConfig(t, ctx, nil, clientNodeConfig, nil, false) defer nodeC.StopAndWait() StartWatchChanErr(t, ctx, feedErrChan, nodeC) @@ -135,7 +136,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigA.BatchPoster.Enable = true nodeConfigA.Feed.Output.Enable = false - l2infoA, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, chainConfig, nil) + l2infoA, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfigA, nil, chainConfig, nil) defer requireClose(t, l1stack, "unable to close l1stack") defer nodeA.StopAndWait() @@ -147,7 +148,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigC.DataAvailability = nodeConfigA.DataAvailability nodeConfigC.DataAvailability.RPCAggregator.Enable = false nodeConfigC.Feed.Output = *newBroadcasterConfigTest() - l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigC, nil) + l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigC, gethexec.ConfigDefaultTest(), nil) defer nodeC.StopAndWait() port := nodeC.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port @@ -158,7 +159,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigB.Feed.Input = *newBroadcastClientConfigTest(port) nodeConfigB.DataAvailability = nodeConfigA.DataAvailability nodeConfigB.DataAvailability.RPCAggregator.Enable = false - l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigB, nil) + l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigB, nil, nil) defer nodeB.StopAndWait() l2infoA.GenerateAccount("FraudUser") @@ -168,12 +169,18 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { l2infoA.GetInfoWithPrivKey("Owner").Nonce -= 1 // Use same l2info object for different l2s realTx := l2infoA.PrepareTx("Owner", "RealUser", l2infoA.TransferGas, big.NewInt(1e12), nil) - err := l2clientC.SendTransaction(ctx, fraudTx) - if err != nil { - t.Fatal("error sending fraud transaction:", err) + for i := 0; i < 10; i++ { + err := l2clientC.SendTransaction(ctx, fraudTx) + if err == nil { + break + } + <-time.After(time.Millisecond * 10) + if i == 9 { + t.Fatal("error sending fraud transaction:", err) + } } - _, err = EnsureTxSucceeded(ctx, l2clientC, fraudTx) + _, err := EnsureTxSucceeded(ctx, l2clientC, fraudTx) if err != nil { t.Fatal("error ensuring fraud transaction succeeded:", err) } diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index de615278c5..a456dc5fe9 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -6,16 +6,20 @@ package arbtest import ( "bytes" "context" + "errors" "fmt" "math/big" "math/rand" "testing" "time" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethclient/gethclient" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" @@ -39,6 +43,54 @@ type blockTestState struct { const seqInboxTestIters = 40 +func encodeAddBatch(seqABI *abi.ABI, seqNum *big.Int, message []byte, afterDelayedMsgRead *big.Int, gasRefunder common.Address) ([]byte, error) { + method, ok := seqABI.Methods["addSequencerL2BatchFromOrigin0"] + if !ok { + return nil, errors.New("failed to find add addSequencerL2BatchFromOrigin0 method") + } + inputData, err := method.Inputs.Pack( + seqNum, + message, + afterDelayedMsgRead, + gasRefunder, + new(big.Int).SetUint64(uint64(1)), + new(big.Int).SetUint64(uint64(1)), + ) + if err != nil { + return nil, err + } + fullData := append([]byte{}, method.ID...) + fullData = append(fullData, inputData...) + return fullData, nil +} +func diffAccessList(accessed, al types.AccessList) string { + m := make(map[common.Address]map[common.Hash]bool) + for i := 0; i < len(al); i++ { + if _, ok := m[al[i].Address]; !ok { + m[al[i].Address] = make(map[common.Hash]bool) + } + for _, slot := range al[i].StorageKeys { + m[al[i].Address][slot] = true + } + } + + diff := "" + for i := 0; i < len(accessed); i++ { + addr := accessed[i].Address + if _, ok := m[addr]; !ok { + diff += fmt.Sprintf("contract address: %q wasn't accessed\n", addr) + continue + } + for j := 0; j < len(accessed[i].StorageKeys); j++ { + slot := accessed[i].StorageKeys[j] + if _, ok := m[addr][slot]; !ok { + diff += fmt.Sprintf("storage slot: %v for contract: %v wasn't accessed\n", slot, addr) + } + } + } + return diff +} + func deployGasRefunder(ctx context.Context, t *testing.T, info *BlockchainTestInfo, client *ethclient.Client) common.Address { t.Helper() abi, err := bridgegen.GasRefunderMetaData.GetAbi() @@ -91,13 +143,20 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if validator { conf.BlockValidator.Enable = true } - l2Info, arbNode, _, l1Info, l1backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, false, conf, nil, nil) - l2Backend := arbNode.Execution.Backend + l2Info, arbNode, _, l1Info, l1backend, l1Client, l1stack := createTestNodeOnL1WithConfig(t, ctx, false, conf, nil, nil, nil) + execNode := getExecNode(t, arbNode) + l2Backend := execNode.Backend defer requireClose(t, l1stack) defer arbNode.StopAndWait() l1BlockChain := l1backend.BlockChain() + rpcC, err := l1stack.Attach() + if err != nil { + t.Fatalf("Error connecting to l1 node: %v", err) + } + gethClient := gethclient.New(rpcC) + seqInbox, err := bridgegen.NewSequencerInbox(l1Info.GetAddress("SequencerInbox"), l1Client) Require(t, err) seqOpts := l1Info.GetDefaultTransactOpts("Sequencer", ctx) @@ -142,6 +201,11 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { } SendWaitTestTransactions(t, ctx, l1Client, faucetTxs) + seqABI, err := bridgegen.SequencerInboxMetaData.GetAbi() + if err != nil { + t.Fatalf("Error getting sequencer inbox abi: %v", err) + } + for i := 1; i < seqInboxTestIters; i++ { if i%10 == 0 { reorgTo := rand.Int() % len(blockStates) @@ -266,6 +330,31 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if err != nil { t.Fatalf("BalanceAt(%v) unexpected error: %v", seqOpts.From, err) } + + data, err := encodeAddBatch(seqABI, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr) + if err != nil { + t.Fatalf("Error encoding batch data: %v", err) + } + si := l1Info.GetAddress("SequencerInbox") + wantAL, _, _, err := gethClient.CreateAccessList(ctx, ethereum.CallMsg{ + From: seqOpts.From, + To: &si, + Data: data, + }) + if err != nil { + t.Fatalf("Error creating access list: %v", err) + } + accessed := arbnode.AccessList(&arbnode.AccessListOpts{ + SequencerInboxAddr: l1Info.GetAddress("SequencerInbox"), + BridgeAddr: l1Info.GetAddress("Bridge"), + DataPosterAddr: seqOpts.From, + GasRefunderAddr: gasRefunderAddr, + SequencerInboxAccs: len(blockStates), + AfterDelayedMessagesRead: 1, + }) + if diff := diffAccessList(accessed, *wantAL); diff != "" { + t.Errorf("Access list mistmatch:\n%s\n", diff) + } if i%5 == 0 { tx, err = seqInbox.AddSequencerL2Batch(&seqOpts, big.NewInt(int64(len(blockStates))), batchData, big.NewInt(1), gasRefunderAddr, big.NewInt(0), big.NewInt(0)) } else { @@ -325,7 +414,7 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { if validator && i%15 == 0 { for i := 0; ; i++ { - expectedPos, err := arbNode.Execution.ExecEngine.BlockNumberToMessageIndex(expectedBlockNumber) + expectedPos, err := execNode.ExecEngine.BlockNumberToMessageIndex(expectedBlockNumber) Require(t, err) lastValidated := arbNode.BlockValidator.Validated(t) if lastValidated == expectedPos+1 { diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 36b112d03a..6267abe0c5 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -28,6 +28,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" @@ -65,22 +66,26 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) types.NewArbitrumSigner(types.NewLondonSigner(l2chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), transferGas, ) - _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, l2chainConfig, nil, l2info) + _, l2nodeA, l2clientA, _, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfigImpl(t, ctx, true, nil, nil, l2chainConfig, nil, l2info) defer requireClose(t, l1stack) defer l2nodeA.StopAndWait() + execNodeA := getExecNode(t, l2nodeA) if faultyStaker { l2info.GenerateGenesisAccount("FaultyAddr", common.Big1) } config := arbnode.ConfigDefaultL1Test() - config.Sequencer.Enable = false + execConfig := gethexec.ConfigDefaultTest() + execConfig.Sequencer.Enable = false + config.Sequencer = false config.DelayedSequencer.Enable = false config.BatchPoster.Enable = false - _, l2nodeB := Create2ndNodeWithConfig(t, ctx, l2nodeA, l1stack, l1info, &l2info.ArbInitData, config, nil) + _, l2nodeB := Create2ndNodeWithConfig(t, ctx, l2nodeA, l1stack, l1info, &l2info.ArbInitData, config, execConfig, nil) defer l2nodeB.StopAndWait() + execNodeB := getExecNode(t, l2nodeB) - nodeAGenesis := l2nodeA.Execution.Backend.APIBackend().CurrentHeader().Hash() - nodeBGenesis := l2nodeB.Execution.Backend.APIBackend().CurrentHeader().Hash() + nodeAGenesis := execNodeA.Backend.APIBackend().CurrentHeader().Hash() + nodeBGenesis := execNodeB.Backend.APIBackend().CurrentHeader().Hash() if faultyStaker { if nodeAGenesis == nodeBGenesis { Fatal(t, "node A L2 genesis hash", nodeAGenesis, "== node B L2 genesis hash", nodeBGenesis) @@ -150,7 +155,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) l2nodeA.InboxReader, l2nodeA.InboxTracker, l2nodeA.TxStreamer, - l2nodeA.Execution.Recorder, + execNodeA, l2nodeA.ArbDB, nil, StaticFetcherFrom(t, &blockValidatorConfig), @@ -190,7 +195,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) l2nodeB.InboxReader, l2nodeB.InboxTracker, l2nodeB.TxStreamer, - l2nodeB.Execution.Recorder, + execNodeB, l2nodeB.ArbDB, nil, StaticFetcherFrom(t, &blockValidatorConfig), diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index 72de2aa50a..6280a4a575 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -20,7 +20,7 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeStr) defer lifecycleManager.StopAndWaitUntil(time.Second) - l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, chainConfig, nil) + l2info, nodeA, l2clientA, l1info, _, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index 0cac9d6442..16c369df46 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -42,7 +42,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, dasModeStr) defer lifecycleManager.StopAndWaitUntil(time.Second) - l2info, nodeA, l2client, l1info, l1backend, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, chainConfig, nil) + l2info, nodeA, l2client, l1info, l1backend, l1client, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, l1NodeConfigA, nil, chainConfig, nil) defer requireClose(t, l1stack) authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index b6ebd02478..d9c302b33f 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -12,9 +12,9 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode" - "github.com/offchainlabs/nitro/arbnode/execution" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/rpcclient" diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index ab61f8a2ee..ff3b420a1c 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -81,6 +81,7 @@ func AddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".poll-only", DefaultConfig.PollOnly, "do not attempt to subscribe to header events") f.Bool(prefix+".use-finality-data", DefaultConfig.UseFinalityData, "use l1 data about finalized/safe blocks") f.Duration(prefix+".poll-interval", DefaultConfig.PollInterval, "interval when polling endpoint") + f.Duration(prefix+".subscribe-err-interval", DefaultConfig.SubscribeErrInterval, "interval for subscribe error") f.Duration(prefix+".tx-timeout", DefaultConfig.TxTimeout, "timeout when waiting for a transaction") f.Duration(prefix+".old-header-timeout", DefaultConfig.OldHeaderTimeout, "warns if the latest l1 block is at least this old") } diff --git a/util/signature/sign_verify.go b/util/signature/sign_verify.go index 2911912979..5ed852bfbc 100644 --- a/util/signature/sign_verify.go +++ b/util/signature/sign_verify.go @@ -31,6 +31,12 @@ func SignVerifyConfigAddOptions(prefix string, f *flag.FlagSet) { } var DefaultSignVerifyConfig = SignVerifyConfig{ + ECDSA: DefultFeedVerifierConfig, + SymmetricFallback: false, + SymmetricSign: false, + Symmetric: EmptySimpleHmacConfig, +} +var TestSignVerifyConfig = SignVerifyConfig{ ECDSA: VerifierConfig{ AcceptSequencer: true, }, diff --git a/util/signature/sign_verify_test.go b/util/signature/sign_verify_test.go index 8ecb6e5ccc..916fc03a20 100644 --- a/util/signature/sign_verify_test.go +++ b/util/signature/sign_verify_test.go @@ -17,7 +17,7 @@ func TestSignVerifyModes(t *testing.T) { signingAddr := crypto.PubkeyToAddress(privateKey.PublicKey) dataSigner := DataSignerFromPrivateKey(privateKey) - config := DefaultSignVerifyConfig + config := TestSignVerifyConfig config.SymmetricFallback = false config.SymmetricSign = false config.ECDSA.AcceptSequencer = false @@ -25,14 +25,14 @@ func TestSignVerifyModes(t *testing.T) { signVerifyECDSA, err := NewSignVerify(&config, dataSigner, nil) Require(t, err) - configSymmetric := DefaultSignVerifyConfig + configSymmetric := TestSignVerifyConfig configSymmetric.SymmetricFallback = true configSymmetric.SymmetricSign = true configSymmetric.ECDSA.AcceptSequencer = false signVerifySymmetric, err := NewSignVerify(&configSymmetric, nil, nil) Require(t, err) - configFallback := DefaultSignVerifyConfig + configFallback := TestSignVerifyConfig configFallback.SymmetricFallback = true configFallback.SymmetricSign = false configFallback.ECDSA.AllowedAddresses = []string{signingAddr.Hex()} diff --git a/util/signature/verifier.go b/util/signature/verifier.go index 2bf5b854ed..c2f6529ec6 100644 --- a/util/signature/verifier.go +++ b/util/signature/verifier.go @@ -37,7 +37,7 @@ var ErrMissingSignature = fmt.Errorf("%w: signature not found", ErrSignatureNotV var ErrSignerNotApproved = fmt.Errorf("%w: signer not approved", ErrSignatureNotVerified) func FeedVerifierConfigAddOptions(prefix string, f *flag.FlagSet) { - f.StringArray(prefix+".allowed-addresses", DefultFeedVerifierConfig.AllowedAddresses, "a list of allowed addresses") + f.StringSlice(prefix+".allowed-addresses", DefultFeedVerifierConfig.AllowedAddresses, "a list of allowed addresses") f.Bool(prefix+".accept-sequencer", DefultFeedVerifierConfig.AcceptSequencer, "accept verified message from sequencer") DangerousFeedVerifierConfigAddOptions(prefix+".dangerous", f) }