diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index ed11ab87da..16c465b9bf 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -120,6 +120,18 @@ func (bcs *BroadcastClients) adjustCount(delta int32) { } } +// Clears out a ticker's channel and resets it to the interval +func clearAndResetTicker(timer *time.Ticker, interval time.Duration) { + timer.Stop() + // Clear out any previous ticks + // A ticker's channel is only buffers one tick, so we don't need a loop here + select { + case <-timer.C: + default: + } + timer.Reset(interval) +} + func (bcs *BroadcastClients) Start(ctx context.Context) { bcs.primaryRouter.StopWaiter.Start(ctx, bcs.primaryRouter) bcs.secondaryRouter.StopWaiter.Start(ctx, bcs.secondaryRouter) @@ -182,46 +194,45 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { return // Primary feeds case msg := <-bcs.primaryRouter.messageChan: - startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) - primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) if err := msgHandler(msg, bcs.primaryRouter); err != nil { log.Error("Error routing message from Primary Sequencer Feeds", "err", err) } + clearAndResetTicker(startSecondaryFeedTimer, MAX_FEED_INACTIVE_TIME) + clearAndResetTicker(primaryFeedIsDownTimer, MAX_FEED_INACTIVE_TIME) case cs := <-bcs.primaryRouter.confirmedSequenceNumberChan: - startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) - primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) confSeqHandler(cs, bcs.primaryRouter) + clearAndResetTicker(startSecondaryFeedTimer, MAX_FEED_INACTIVE_TIME) + clearAndResetTicker(primaryFeedIsDownTimer, MAX_FEED_INACTIVE_TIME) // Failed to get messages from primary feed for ~5 seconds, reset the timer responsible for stopping a secondary case <-primaryFeedIsDownTimer.C: - stopSecondaryFeedTimer.Reset(PRIMARY_FEED_UPTIME) + clearAndResetTicker(stopSecondaryFeedTimer, PRIMARY_FEED_UPTIME) default: select { case <-ctx.Done(): return // Secondary Feeds case msg := <-bcs.secondaryRouter.messageChan: - startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) if err := msgHandler(msg, bcs.secondaryRouter); err != nil { log.Error("Error routing message from Secondary Sequencer Feeds", "err", err) } + clearAndResetTicker(startSecondaryFeedTimer, MAX_FEED_INACTIVE_TIME) case cs := <-bcs.secondaryRouter.confirmedSequenceNumberChan: - startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) confSeqHandler(cs, bcs.secondaryRouter) - + clearAndResetTicker(startSecondaryFeedTimer, MAX_FEED_INACTIVE_TIME) case msg := <-bcs.primaryRouter.messageChan: - startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) - primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) if err := msgHandler(msg, bcs.primaryRouter); err != nil { log.Error("Error routing message from Primary Sequencer Feeds", "err", err) } + clearAndResetTicker(startSecondaryFeedTimer, MAX_FEED_INACTIVE_TIME) + clearAndResetTicker(primaryFeedIsDownTimer, MAX_FEED_INACTIVE_TIME) case cs := <-bcs.primaryRouter.confirmedSequenceNumberChan: - startSecondaryFeedTimer.Reset(MAX_FEED_INACTIVE_TIME) - primaryFeedIsDownTimer.Reset(MAX_FEED_INACTIVE_TIME) confSeqHandler(cs, bcs.primaryRouter) + clearAndResetTicker(startSecondaryFeedTimer, MAX_FEED_INACTIVE_TIME) + clearAndResetTicker(primaryFeedIsDownTimer, MAX_FEED_INACTIVE_TIME) case <-startSecondaryFeedTimer.C: bcs.startSecondaryFeed(ctx) case <-primaryFeedIsDownTimer.C: - stopSecondaryFeedTimer.Reset(PRIMARY_FEED_UPTIME) + clearAndResetTicker(stopSecondaryFeedTimer, PRIMARY_FEED_UPTIME) } } } diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index c0d6b3d005..13e586ced2 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -6,6 +6,7 @@ package chaininfo import ( _ "embed" "encoding/json" + "errors" "fmt" "math/big" "os" @@ -86,7 +87,10 @@ func ProcessChainInfo(chainId uint64, chainName string, l2ChainInfoFiles []strin if chainId != 0 { return nil, fmt.Errorf("unsupported chain ID %v", chainId) } - return nil, fmt.Errorf("unsupported chain name %v", chainName) + if chainName != "" { + return nil, fmt.Errorf("unsupported chain name %v", chainName) + } + return nil, errors.New("must specify --chain.id or --chain.name to choose rollup") } func findChainInfo(chainId uint64, chainName string, chainsInfoBytes []byte) (*ChainInfo, error) { @@ -95,6 +99,10 @@ func findChainInfo(chainId uint64, chainName string, chainsInfoBytes []byte) (*C if err != nil { return nil, err } + if chainId == 0 && chainName == "" && len(chainsInfo) == 1 { + // If single chain info and no chain id/name given, default to single chain info + return &chainsInfo[0], nil + } for _, chainInfo := range chainsInfo { if (chainId == 0 || chainInfo.ChainConfig.ChainID.Uint64() == chainId) && (chainName == "" || chainInfo.ChainName == chainName) { return &chainInfo, nil diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index da88060dd7..f23fbc90de 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -14,6 +14,7 @@ import ( "os/signal" "path/filepath" "reflect" + "regexp" "strings" "syscall" "time" @@ -50,6 +51,7 @@ import ( "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/execution/gethexec" _ "github.com/offchainlabs/nitro/nodeInterface" + "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" @@ -491,6 +493,43 @@ func mainImpl() int { log.Error("failed to create node", "err", err) return 1 } + + // Validate sequencer's MaxTxDataSize and batchPoster's MaxSize params. + config := liveNodeConfig.Get() + executionRevertedRegexp := regexp.MustCompile("(?i)execution reverted") + // SequencerInbox's maxDataSize is defaulted to 117964 which is 90% of Geth's 128KB tx size limit, leaving ~13KB for proving. + seqInboxMaxDataSize := 117964 + if config.Node.ParentChainReader.Enable { + seqInbox, err := bridgegen.NewSequencerInbox(rollupAddrs.SequencerInbox, l1Client) + if err != nil { + log.Error("failed to create sequencer inbox for validating sequencer's MaxTxDataSize and batchposter's MaxSize", "err", err) + return 1 + } + res, err := seqInbox.MaxDataSize(&bind.CallOpts{Context: ctx}) + seqInboxMaxDataSize = int(res.Int64()) + if err != nil && !executionRevertedRegexp.MatchString(err.Error()) { + log.Error("error fetching MaxDataSize from sequencer inbox", "err", err) + return 1 + } + } + // If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service is not enabled. + // The 10kB gap is because its possible for the batch poster to exceed its MaxSize limit and produce batches of slightly larger size. + if config.Node.BatchPoster.Enable && !config.Node.DataAvailability.Enable { + if config.Node.BatchPoster.MaxSize > seqInboxMaxDataSize-10000 { + log.Error("batchPoster's MaxSize is too large") + return 1 + } + } + // If sequencer is enabled, validate MaxTxDataSize to be at least 5kB below the batch poster's MaxSize to allow space for headers and such. + // And since batchposter's MaxSize is to be at least 10kB below the sequencer inbox’s maxDataSize, this leads to another condition of atlest 15kB below the sequencer inbox’s maxDataSize. + if config.Execution.Sequencer.Enable { + if config.Execution.Sequencer.MaxTxDataSize > config.Node.BatchPoster.MaxSize-5000 || + config.Execution.Sequencer.MaxTxDataSize > seqInboxMaxDataSize-15000 { + log.Error("sequencer's MaxTxDataSize too large") + return 1 + } + } + liveNodeConfig.SetOnReloadHook(func(oldCfg *NodeConfig, newCfg *NodeConfig) error { if err := genericconf.InitLog(newCfg.LogType, log.Lvl(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { return fmt.Errorf("failed to re-init logging: %w", err) @@ -721,12 +760,9 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa l2ChainName := k.String("chain.name") l2ChainInfoIpfsUrl := k.String("chain.info-ipfs-url") l2ChainInfoIpfsDownloadPath := k.String("chain.info-ipfs-download-path") - if l2ChainId == 0 && l2ChainName == "" { - return nil, nil, nil, errors.New("must specify --chain.id or --chain.name to choose rollup") - } l2ChainInfoFiles := k.Strings("chain.info-files") l2ChainInfoJson := k.String("chain.info-json") - chainFound, err := applyChainParameters(ctx, k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) + err = applyChainParameters(ctx, k, uint64(l2ChainId), l2ChainName, l2ChainInfoFiles, l2ChainInfoJson, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) if err != nil { return nil, nil, nil, err } @@ -755,13 +791,6 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa } if nodeConfig.Persistent.Chain == "" { - if !chainFound { - // If persistent-chain not defined, user not creating custom chain - if l2ChainId != 0 { - return nil, nil, nil, fmt.Errorf("Unknown chain id: %d, L2ChainInfoFiles: %v. update chain id, modify --chain.info-files or provide --persistent.chain\n", l2ChainId, l2ChainInfoFiles) - } - return nil, nil, nil, fmt.Errorf("Unknown chain name: %s, L2ChainInfoFiles: %v. update chain name, modify --chain.info-files or provide --persistent.chain\n", l2ChainName, l2ChainInfoFiles) - } return nil, nil, nil, errors.New("--persistent.chain not specified") } @@ -786,7 +815,7 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa return &nodeConfig, &l1Wallet, &l2DevWallet, nil } -func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) (bool, error) { +func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, chainName string, l2ChainInfoFiles []string, l2ChainInfoJson string, l2ChainInfoIpfsUrl string, l2ChainInfoIpfsDownloadPath string) error { combinedL2ChainInfoFiles := l2ChainInfoFiles if l2ChainInfoIpfsUrl != "" { l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, l2ChainInfoIpfsUrl, l2ChainInfoIpfsDownloadPath) @@ -797,7 +826,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c } chainInfo, err := chaininfo.ProcessChainInfo(chainId, chainName, combinedL2ChainInfoFiles, l2ChainInfoJson) if err != nil { - return false, err + return err } var parentChainIsArbitrum bool if chainInfo.ParentChainIsArbitrum != nil { @@ -830,6 +859,8 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c chainDefaults["node.data-availability.enable"] = true chainDefaults["node.data-availability.rest-aggregator.enable"] = true chainDefaults["node.data-availability.rest-aggregator.online-url-list"] = chainInfo.DasIndexUrl + } else if chainInfo.ChainConfig.ArbitrumChainParams.DataAvailabilityCommittee { + chainDefaults["node.data-availability.enable"] = true } if !chainInfo.HasGenesisState { chainDefaults["init.empty"] = true @@ -838,7 +869,7 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c l2MaxTxSize := gethexec.DefaultSequencerConfig.MaxTxDataSize bufferSpace := 5000 if l2MaxTxSize < bufferSpace*2 { - return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) + return fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) } safeBatchSize := l2MaxTxSize - bufferSpace chainDefaults["node.batch-poster.max-size"] = safeBatchSize @@ -849,9 +880,9 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c } err = k.Load(confmap.Provider(chainDefaults, "."), nil) if err != nil { - return false, err + return err } - return true, nil + return nil } type NodeConfigFetcher struct {