diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 8a69bf13ef..8144c9b7be 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -11,6 +11,7 @@ import ( "fmt" "math" "math/big" + "strings" "sync/atomic" "time" @@ -167,19 +168,20 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, - MaxSize: 100000, - PollInterval: time.Second * 10, - ErrorDelay: time.Second * 10, - MaxDelay: time.Hour, - WaitForMaxDelay: false, - CompressionLevel: brotli.BestCompression, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 50_000, - DataPoster: dataposter.DefaultDataPosterConfig, - ParentChainWallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go + MaxSize: 100000, + PollInterval: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, + CompressionLevel: brotli.BestCompression, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 50_000, + DataPoster: dataposter.DefaultDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ @@ -261,12 +263,13 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe // contain reverted batch_poster transaction. // It returns true if it finds batch posting needs to halt, which is true if a batch reverts // unless the data poster is configured with noop storage which can tolerate reverts. -func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, error) { - if from > to { - return false, fmt.Errorf("wrong range, from: %d is more to: %d", from, to) +// From must be a pointer to the starting block, which is updated after each block is checked for reverts +func (b *BatchPoster) checkReverts(ctx context.Context, from *int64, to int64) (bool, error) { + if *from > to { + return false, fmt.Errorf("wrong range, from: %d > to: %d", from, to) } - for idx := from; idx <= to; idx++ { - number := big.NewInt(idx) + for ; *from <= to; *from++ { + number := big.NewInt(*from) block, err := b.l1Reader.Client().BlockByNumber(ctx, number) if err != nil { return false, fmt.Errorf("getting block: %v by number: %w", number, err) @@ -276,7 +279,7 @@ func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, e if err != nil { return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err) } - if bytes.Equal(from.Bytes(), b.dataPoster.Sender().Bytes()) { + if from == b.dataPoster.Sender() { r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) @@ -302,7 +305,7 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { headerCh, unsubscribe := b.l1Reader.Subscribe(false) defer unsubscribe() - last := int64(0) // number of last seen block + nextToCheck := int64(0) // the first unchecked block for { // Poll until: // - L1 headers reader channel is closed, or @@ -311,31 +314,37 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { select { case h, ok := <-headerCh: if !ok { - log.Info("L1 headers channel has been closed") + log.Info("L1 headers channel checking for batch poster reverts has been closed") return } // If this is the first block header, set last seen as number-1. // We may see same block number again if there is L1 reorg, in that // case we check the block again. - if last == 0 || last == h.Number.Int64() { - last = h.Number.Int64() - 1 + if nextToCheck == 0 || nextToCheck == h.Number.Int64() { + nextToCheck = h.Number.Int64() } - if h.Number.Int64()-last > 100 { - log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", last, "current", h.Number) - last = h.Number.Int64() + if h.Number.Int64()-nextToCheck > 100 { + log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", nextToCheck, "current", h.Number) + nextToCheck = h.Number.Int64() continue } - reverted, err := b.checkReverts(ctx, last+1, h.Number.Int64()) + reverted, err := b.checkReverts(ctx, &nextToCheck, h.Number.Int64()) if err != nil { - log.Error("Checking batch reverts", "error", err) + logLevel := log.Error + if strings.Contains(err.Error(), "not found") { + // Just parent chain node inconsistency + // One node sent us a block, but another didn't have it + // We'll try to check this block again next loop + logLevel = log.Debug + } + logLevel("Error checking batch reverts", "err", err) continue } if reverted { b.batchReverted.Store(true) return } - last = h.Number.Int64() case <-ctx.Done(): return } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index dff2602cac..b1e6555b26 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -111,16 +111,16 @@ func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, a switch { case initConfig.UseNoOpStorage: queue = &noop.Storage{} - case initConfig.UseLevelDB: - queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) - case redisClient == nil: - queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) - default: + case redisClient != nil: var err error queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF) if err != nil { return nil, err } + case initConfig.UseLevelDB: + queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) + default: + queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) } return &DataPoster{ headerReader: headerReader, @@ -665,7 +665,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ MaxTipCapGwei: 5, NonceRbfSoftConfs: 1, AllocateMempoolBalance: true, - UseLevelDB: false, + UseLevelDB: true, UseNoOpStorage: false, LegacyStorageEncoding: true, } diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index b59bf7bf62..70637c48e0 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -15,6 +15,7 @@ var ( ErrStorageRace = errors.New("storage race error") BlockValidatorPrefix string = "v" // the prefix for all block validator keys + StakerPrefix string = "S" // the prefix for all staker keys BatchPosterPrefix string = "b" // the prefix for all batch poster keys // TODO(anodar): move everything else from schema.go file to here once // execution split is complete. diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index f45a85ac49..aa6d43785e 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -52,14 +52,14 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ Enable: false, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } var TestDelayedSequencerConfig = DelayedSequencerConfig{ Enable: true, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index 402958399d..927ce7ac08 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -110,6 +110,7 @@ var DefaultSequencerConfig = SequencerConfig{ NonceCacheSize: 1024, Dangerous: DefaultDangerousSequencerConfig, // 95% of the default batch poster limit, leaving 5KB for headers and such + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index dc069a6d18..968a1f266b 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -145,11 +145,6 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if config.Strictness < TxPreCheckerStrictnessLikelyCompatible { return nil } - balance := statedb.GetBalance(sender) - cost := tx.Cost() - if arbmath.BigLessThan(balance, cost) { - return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) - } if options != nil { if err := options.Check(extraInfo.L1BlockNumber, header.Time, statedb); err != nil { conditionalTxRejectedByTxPreCheckerCurrentStateCounter.Inc(1) @@ -185,6 +180,11 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty conditionalTxAcceptedByTxPreCheckerOldStateCounter.Inc(1) } } + balance := statedb.GetBalance(sender) + cost := tx.Cost() + if arbmath.BigLessThan(balance, cost) { + return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) + } if config.Strictness >= TxPreCheckerStrictnessFullValidation && tx.Nonce() > stateNonce { return MakeNonceError(sender, tx.Nonce(), stateNonce) } diff --git a/arbnode/node.go b/arbnode/node.go index 7fde929771..a40c84fa94 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -235,20 +235,12 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } } -func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { - arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) - l1Reader, err := headerreader.New(ctx, l1client, readerConfig, arbSys, true) - if err != nil { - return nil, err - } - l1Reader.Start(ctx) - defer l1Reader.StopAndWait() - +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, l1Reader, deployAuth) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -267,7 +259,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b if err != nil { return nil, fmt.Errorf("error submitting create rollup tx: %w", err) } - receipt, err := l1Reader.WaitForTxApproval(ctx, tx) + receipt, err := parentChainReader.WaitForTxApproval(ctx, tx) if err != nil { return nil, fmt.Errorf("error executing create rollup tx: %w", err) } @@ -547,7 +539,7 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error { return nil } -func ValidatorDataposter( +func StakerDataposter( db ethdb.Database, l1Reader *headerreader.HeaderReader, transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor, ) (*dataposter.DataPoster, error) { @@ -812,8 +804,8 @@ func createNodeImpl( var messagePruner *MessagePruner if config.Staker.Enable { - dp, err := ValidatorDataposter( - rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), + dp, err := StakerDataposter( + rawdb.NewTable(arbDb, storage.StakerPrefix), l1Reader, txOptsValidator, configFetcher, diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index f5fa56102c..e66774d426 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -2,6 +2,7 @@ { "chain-name": "arb1", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc", "feed-url": "wss://arb1.arbitrum.io/feed", "has-genesis-state": true, @@ -51,6 +52,7 @@ { "chain-name": "nova", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://nova.arbitrum.io/rpc", "feed-url": "wss://nova.arbitrum.io/feed", "das-index-url": "https://nova.arbitrum.io/das-servers", @@ -100,6 +102,7 @@ { "chain-name": "goerli-rollup", "parent-chain-id": 5, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://goerli-rollup.arbitrum.io/rpc", "feed-url": "wss://goerli-rollup.arbitrum.io/feed", "chain-config": @@ -215,9 +218,10 @@ } } }, - { + { "chain-id": 421614, "parent-chain-id": 11155111, + "parent-chain-is-arbitrum": false, "chain-name": "sepolia-rollup", "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc", "feed-url": "wss://sepolia-rollup.arbitrum.io/feed", diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index c9ffca9830..f75779b4aa 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -18,8 +18,9 @@ import ( var DefaultChainInfo []byte type ChainInfo struct { - ChainName string `json:"chain-name"` - ParentChainId uint64 `json:"parent-chain-id"` + ChainName string `json:"chain-name"` + ParentChainId uint64 `json:"parent-chain-id"` + ParentChainIsArbitrum *bool `json:"parent-chain-is-arbitrum"` // This is the forwarding target to submit transactions to, called the sequencer URL for clarity SequencerUrl string `json:"sequencer-url"` FeedUrl string `json:"feed-url"` diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 357fda14e6..17725a7a4c 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -127,13 +127,19 @@ func main() { panic(fmt.Errorf("failed to deserialize chain config: %w", err)) } + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }) + if err != nil { + panic(fmt.Errorf("failed to create header reader: %w", err)) + } + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + deployedAddresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, l1TransactionOpts, sequencerAddress, *authorizevalidators, - func() *headerreader.Config { return &headerReaderConfig }, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), ) if err != nil { @@ -148,12 +154,14 @@ func main() { if err := os.WriteFile(*outfile, deployData, 0600); err != nil { panic(err) } + parentChainIsArbitrum := l1Reader.IsParentChainArbitrum() chainsInfo := []chaininfo.ChainInfo{ { - ChainName: *l2ChainName, - ParentChainId: l1ChainId.Uint64(), - ChainConfig: &chainConfig, - RollupAddresses: deployedAddresses, + ChainName: *l2ChainName, + ParentChainId: l1ChainId.Uint64(), + ParentChainIsArbitrum: &parentChainIsArbitrum, + ChainConfig: &chainConfig, + RollupAddresses: deployedAddresses, }, } chainsInfoJson, err := json.Marshal(chainsInfo) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index e404733b1e..f2cb3a37d2 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -360,7 +360,6 @@ func mainImpl() int { l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys, true) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) - } // Just create validator smart wallet if needed then exit @@ -771,6 +770,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if err != nil { return false, err } + var parentChainIsArbitrum bool + if chainInfo.ParentChainIsArbitrum != nil { + parentChainIsArbitrum = *chainInfo.ParentChainIsArbitrum + } else { + log.Warn("Chain information parentChainIsArbitrum field missing, in the future this will be required", "chainId", chainId, "parentChainId", chainInfo.ParentChainId) + _, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", combinedL2ChainInfoFiles, "") + if err == nil { + parentChainIsArbitrum = true + } + } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), @@ -790,6 +799,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if !chainInfo.HasGenesisState { chainDefaults["init.empty"] = true } + if parentChainIsArbitrum { + l2MaxTxSize := execution.DefaultSequencerConfig.MaxTxDataSize + bufferSpace := 5000 + if l2MaxTxSize < bufferSpace*2 { + return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) + } + safeBatchSize := l2MaxTxSize - bufferSpace + chainDefaults["node.batch-poster.max-size"] = safeBatchSize + chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - bufferSpace + } err = k.Load(confmap.Provider(chainDefaults, "."), nil) if err != nil { return false, err diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go index 09175332bf..5285e96ea9 100644 --- a/staker/eoa_validator_wallet.go +++ b/staker/eoa_validator_wallet.go @@ -15,11 +15,9 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/util/stopwaiter" ) type EoaValidatorWallet struct { - stopwaiter.StopWaiter auth *bind.TransactOpts client arbutil.L1Interface rollupAddress common.Address @@ -129,11 +127,9 @@ func (w *EoaValidatorWallet) AuthIfEoa() *bind.TransactOpts { func (w *EoaValidatorWallet) Start(ctx context.Context) { w.dataPoster.Start(ctx) - w.StopWaiter.Start(ctx, w) } func (b *EoaValidatorWallet) StopAndWait() { - b.StopWaiter.StopAndWait() b.dataPoster.StopAndWait() } diff --git a/staker/staker.go b/staker/staker.go index 9b7e6c238e..1b6538b161 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -372,11 +372,15 @@ func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address func (s *Staker) StopAndWait() { s.StopWaiter.StopAndWait() - s.wallet.StopAndWait() + if s.Strategy() != WatchtowerStrategy { + s.wallet.StopAndWait() + } } func (s *Staker) Start(ctxIn context.Context) { - s.wallet.Start(ctxIn) + if s.Strategy() != WatchtowerStrategy { + s.wallet.Start(ctxIn) + } s.StopWaiter.Start(ctxIn, s) backoff := time.Second s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) { @@ -548,11 +552,11 @@ func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { } func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { - err := s.confirmDataPosterIsReady(ctx) - if err != nil { - return nil, err - } if s.config.strategy != WatchtowerStrategy { + err := s.confirmDataPosterIsReady(ctx) + if err != nil { + return nil, err + } whitelisted, err := s.IsWhitelisted(ctx) if err != nil { return nil, fmt.Errorf("error checking if whitelisted: %w", err) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 7add3e258d..7131fe6074 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "regexp" "sync" "testing" @@ -455,8 +456,9 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { } v.pendingWasmModuleRoot = latest } else { + valid, _ := regexp.MatchString("(0x)?[0-9a-fA-F]{64}", v.config.PendingUpgradeModuleRoot) v.pendingWasmModuleRoot = common.HexToHash(v.config.PendingUpgradeModuleRoot) - if (v.pendingWasmModuleRoot == common.Hash{}) { + if (!valid || v.pendingWasmModuleRoot == common.Hash{}) { return errors.New("pending-upgrade-module-root config value illegal") } } diff --git a/staker/validator_wallet.go b/staker/validator_wallet.go index 133a808eac..fb0f5ed956 100644 --- a/staker/validator_wallet.go +++ b/staker/validator_wallet.go @@ -24,7 +24,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/util/stopwaiter" ) var validatorABI abi.ABI @@ -66,7 +65,6 @@ type ValidatorWalletInterface interface { } type ContractValidatorWallet struct { - stopwaiter.StopWaiter con *rollupgen.ValidatorWallet address atomic.Pointer[common.Address] onWalletCreated func(common.Address) @@ -413,11 +411,11 @@ func (v *ContractValidatorWallet) AuthIfEoa() *bind.TransactOpts { } func (w *ContractValidatorWallet) Start(ctx context.Context) { - w.StopWaiter.Start(ctx, w) + w.dataPoster.Start(ctx) } func (b *ContractValidatorWallet) StopAndWait() { - b.StopWaiter.StopAndWait() + b.dataPoster.StopAndWait() } func (b *ContractValidatorWallet) DataPoster() *dataposter.DataPoster { diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 81cb18ab30..b92fbf7578 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -475,13 +475,18 @@ func DeployOnTestL1( Require(t, err) serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) + + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }) + Require(t, err) + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + addresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, &l1TransactionOpts, l1info.GetAddress("Sequencer"), 0, - func() *headerreader.Config { return &headerreader.TestConfig }, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), ) Require(t, err) diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 82eede9f60..96ea1ee2e7 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -130,7 +130,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) valConfig := staker.TestL1ValidatorConfig - dpA, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpA, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } @@ -178,7 +178,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err) - dpB, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpB, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } @@ -217,7 +217,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = valWalletB.Initialize(ctx) Require(t, err) } - dpC, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpC, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) }