diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2f5765d72..a0f5251f9f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,6 +129,10 @@ jobs: version: latest skip-go-installation: true skip-pkg-cache: true + - name: Custom Lint + run: | + go run ./linter/koanf ./... + go run ./linter/pointercheck ./... - name: Set environment variables run: | diff --git a/.gitignore b/.gitignore index f0eb5c2ec3..8a628e29c4 100644 --- a/.gitignore +++ b/.gitignore @@ -19,5 +19,6 @@ solgen/go/ target/ yarn-error.log local/ -testdata system_tests/test-data/* +system_tests/testdata/* +arbos/testdata/* diff --git a/Dockerfile b/Dockerfile index 62455119fe..96bcb22952 100644 --- a/Dockerfile +++ b/Dockerfile @@ -203,6 +203,7 @@ WORKDIR /home/user COPY --from=node-builder /workspace/target/bin/nitro /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/relay /usr/local/bin/ COPY --from=node-builder /workspace/target/bin/nitro-val /usr/local/bin/ +COPY --from=node-builder /workspace/target/bin/seq-coordinator-manager /usr/local/bin/ COPY --from=machine-versions /workspace/machines /home/user/target/machines USER root RUN export DEBIAN_FRONTEND=noninteractive && \ diff --git a/Makefile b/Makefile index fdbde677f6..38ffb96200 100644 --- a/Makefile +++ b/Makefile @@ -88,7 +88,7 @@ push: lint test-go .make/fmt all: build build-replay-env test-gen-proofs @touch .make/all -build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val) +build: $(patsubst %,$(output_root)/bin/%, nitro deploy relay daserver datool seq-coordinator-invalidate nitro-val seq-coordinator-manager) @printf $(done) build-node-deps: $(go_source) build-prover-header build-prover-lib build-jit .make/solgen .make/cbrotli-lib @@ -185,6 +185,9 @@ $(output_root)/bin/seq-coordinator-invalidate: $(DEP_PREDICATE) build-node-deps $(output_root)/bin/nitro-val: $(DEP_PREDICATE) build-node-deps go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/nitro-val" +$(output_root)/bin/seq-coordinator-manager: $(DEP_PREDICATE) build-node-deps + go build $(GOLANG_PARAMS) -o $@ "$(CURDIR)/cmd/seq-coordinator-manager" + # recompile wasm, but don't change timestamp unless files differ $(replay_wasm): $(DEP_PREDICATE) $(go_source) .make/solgen mkdir -p `dirname $(replay_wasm)` @@ -304,6 +307,8 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make + go run ./linter/koanf ./... + go run ./linter/pointercheck ./... golangci-lint run --fix yarn --cwd contracts solhint @touch $@ diff --git a/arbitrator/jit/src/syscall.rs b/arbitrator/jit/src/syscall.rs index 4cd0363b49..c81641a7f8 100644 --- a/arbitrator/jit/src/syscall.rs +++ b/arbitrator/jit/src/syscall.rs @@ -306,10 +306,10 @@ pub fn js_value_index(mut env: WasmEnvMut, sp: u32) { pub fn js_value_call(mut env: WasmEnvMut, sp: u32) -> MaybeEscape { let Some(resume) = env.data().exports.resume.clone() else { - return Escape::failure(format!("wasmer failed to bind {}", "resume".red())) + return Escape::failure(format!("wasmer failed to bind {}", "resume".red())); }; let Some(get_stack_pointer) = env.data().exports.get_stack_pointer.clone() else { - return Escape::failure(format!("wasmer failed to bind {}", "getsp".red())) + return Escape::failure(format!("wasmer failed to bind {}", "getsp".red())); }; let sp = GoStack::simple(sp, &env); let data = env.data_mut(); diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index d6e002daac..0849312f3d 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -651,7 +651,7 @@ pub struct MachineState<'a> { initial_hash: Bytes32, } -pub type PreimageResolver = Arc Option>; +pub type PreimageResolver = Arc Option + Send + Sync>; /// Wraps a preimage resolver to provide an easier API /// and cache the last preimage retrieved. diff --git a/arbitrator/prover/src/utils.rs b/arbitrator/prover/src/utils.rs index 6c11e9af05..e86ea96768 100644 --- a/arbitrator/prover/src/utils.rs +++ b/arbitrator/prover/src/utils.rs @@ -158,6 +158,13 @@ impl From<&[u8]> for CBytes { } } +// There's no thread safety concerns for CBytes. +// This type is basically a Box<[u8]> (which is Send + Sync) with libc as an allocator. +// Any data races between threads are prevented by Rust borrowing rules, +// and the data isn't thread-local so there's no concern moving it between threads. +unsafe impl Send for CBytes {} +unsafe impl Sync for CBytes {} + #[derive(Serialize, Deserialize)] #[serde(remote = "Type")] enum RemoteType { diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index f870e3a6fa..89a36eba91 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -11,6 +11,7 @@ import ( "fmt" "math" "math/big" + "strings" "sync/atomic" "time" @@ -56,23 +57,27 @@ type batchPosterPosition struct { type BatchPoster struct { stopwaiter.StopWaiter - l1Reader *headerreader.HeaderReader - inbox *InboxTracker - streamer *TransactionStreamer - config BatchPosterConfigFetcher - seqInbox *bridgegen.SequencerInbox - bridge *bridgegen.Bridge - syncMonitor *SyncMonitor - seqInboxABI *abi.ABI - seqInboxAddr common.Address - building *buildingBatch - daWriter das.DataAvailabilityServiceWriter - dataPoster *dataposter.DataPoster - redisLock *redislock.Simple - firstAccErr time.Time // first time a continuous missing accumulator occurred - backlog uint64 // An estimate of the number of unposted batches - - batchReverted atomic.Bool // indicates whether data poster batch was reverted + l1Reader *headerreader.HeaderReader + inbox *InboxTracker + streamer *TransactionStreamer + config BatchPosterConfigFetcher + seqInbox *bridgegen.SequencerInbox + bridge *bridgegen.Bridge + syncMonitor *SyncMonitor + seqInboxABI *abi.ABI + seqInboxAddr common.Address + building *buildingBatch + daWriter das.DataAvailabilityServiceWriter + dataPoster *dataposter.DataPoster + redisLock *redislock.Simple + firstEphemeralError time.Time // first time a continuous error suspected to be ephemeral occurred + // An estimate of the number of batches we want to post but haven't yet. + // This doesn't include batches which we don't want to post yet due to the L1 bounds. + backlog uint64 + lastHitL1Bounds time.Time // The last time we wanted to post a message but hit the L1 bounds + + batchReverted atomic.Bool // indicates whether data poster batch was reverted + nextRevertCheckBlock int64 // the last parent block scanned for reverting batches } type l1BlockBound int @@ -88,23 +93,28 @@ const ( ) type BatchPosterConfig struct { - Enable bool `koanf:"enable"` - DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` - MaxBatchSize int `koanf:"max-size" reload:"hot"` - MaxBatchPostDelay time.Duration `koanf:"max-delay" reload:"hot"` - WaitForMaxBatchPostDelay bool `koanf:"wait-for-max-delay" reload:"hot"` - BatchPollDelay time.Duration `koanf:"poll-delay" reload:"hot"` - PostingErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` - CompressionLevel int `koanf:"compression-level" reload:"hot"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` - GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` - ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` - L1Wallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` - L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` - L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` + Enable bool `koanf:"enable"` + DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` + // Max batch size. + MaxSize int `koanf:"max-size" reload:"hot"` + // Max batch post delay. + MaxDelay time.Duration `koanf:"max-delay" reload:"hot"` + // Wait for max BatchPost delay. + WaitForMaxDelay bool `koanf:"wait-for-max-delay" reload:"hot"` + // Batch post polling interval. + PollInterval time.Duration `koanf:"poll-interval" reload:"hot"` + // Batch posting error delay. + ErrorDelay time.Duration `koanf:"error-delay" reload:"hot"` + CompressionLevel int `koanf:"compression-level" reload:"hot"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period" reload:"hot"` + GasRefunderAddress string `koanf:"gas-refunder-address" reload:"hot"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` + L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` gasRefunder common.Address l1BlockBound l1BlockBound @@ -115,7 +125,7 @@ func (c *BatchPosterConfig) Validate() error { return fmt.Errorf("invalid gas refunder address \"%v\"", c.GasRefunderAddress) } c.gasRefunder = common.HexToAddress(c.GasRefunderAddress) - if c.MaxBatchSize <= 40 { + if c.MaxSize <= 40 { return errors.New("MaxBatchSize too small") } if c.L1BlockBound == "" { @@ -139,11 +149,11 @@ type BatchPosterConfigFetcher func() *BatchPosterConfig func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") - f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxBatchSize, "maximum batch size") - f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxBatchPostDelay, "maximum batch posting delay") - f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxBatchPostDelay, "wait for the max batch delay, even if the batch is full") - f.Duration(prefix+".poll-delay", DefaultBatchPosterConfig.BatchPollDelay, "how long to delay after successfully posting batch") - f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.PostingErrorDelay, "how long to delay after error posting batch") + f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") + f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxDelay, "wait for the max batch delay, even if the batch is full") + f.Duration(prefix+".poll-interval", DefaultBatchPosterConfig.PollInterval, "how long to wait after no batches are ready to be posted before checking again") + f.Duration(prefix+".error-delay", DefaultBatchPosterConfig.ErrorDelay, "how long to delay after error posting batch") f.Int(prefix+".compression-level", DefaultBatchPosterConfig.CompressionLevel, "batch compression level") f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") @@ -153,50 +163,51 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") redislock.AddConfigOptions(prefix+".redis-lock", f) dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) - genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.L1Wallet.Pathname) + genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultBatchPosterConfig.ParentChainWallet.Pathname) } var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, - MaxBatchSize: 100000, - BatchPollDelay: time.Second * 10, - PostingErrorDelay: time.Second * 10, - MaxBatchPostDelay: time.Hour, - WaitForMaxBatchPostDelay: false, - CompressionLevel: brotli.BestCompression, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 50_000, - DataPoster: dataposter.DefaultDataPosterConfig, - L1Wallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go + MaxSize: 100000, + PollInterval: time.Second * 10, + ErrorDelay: time.Second * 10, + MaxDelay: time.Hour, + WaitForMaxDelay: false, + CompressionLevel: brotli.BestCompression, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 50_000, + DataPoster: dataposter.DefaultDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, } var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ Pathname: "batch-poster-wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, } var TestBatchPosterConfig = BatchPosterConfig{ - Enable: true, - MaxBatchSize: 100000, - BatchPollDelay: time.Millisecond * 10, - PostingErrorDelay: time.Millisecond * 10, - MaxBatchPostDelay: 0, - WaitForMaxBatchPostDelay: false, - CompressionLevel: 2, - DASRetentionPeriod: time.Hour * 24 * 15, - GasRefunderAddress: "", - ExtraBatchGas: 10_000, - DataPoster: dataposter.TestDataPosterConfig, - L1Wallet: DefaultBatchPosterL1WalletConfig, - L1BlockBound: "", - L1BlockBoundBypass: time.Hour, + Enable: true, + MaxSize: 100000, + PollInterval: time.Millisecond * 10, + ErrorDelay: time.Millisecond * 10, + MaxDelay: 0, + WaitForMaxDelay: false, + CompressionLevel: 2, + DASRetentionPeriod: time.Hour * 24 * 15, + GasRefunderAddress: "", + ExtraBatchGas: 10_000, + DataPoster: dataposter.TestDataPosterConfig, + ParentChainWallet: DefaultBatchPosterL1WalletConfig, + L1BlockBound: "", + L1BlockBoundBypass: time.Hour, } func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderReader, inbox *InboxTracker, streamer *TransactionStreamer, syncMonitor *SyncMonitor, config BatchPosterConfigFetcher, deployInfo *chaininfo.RollupAddresses, transactOpts *bind.TransactOpts, daWriter das.DataAvailabilityServiceWriter) (*BatchPoster, error) { @@ -251,12 +262,14 @@ func NewBatchPoster(dataPosterDB ethdb.Database, l1Reader *headerreader.HeaderRe // checkRevert checks blocks with number in range [from, to] whether they // contain reverted batch_poster transaction. -func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, error) { - if from > to { - return false, fmt.Errorf("wrong range, from: %d is more to: %d", from, to) - } - for idx := from; idx <= to; idx++ { - number := big.NewInt(idx) +// It returns true if it finds batch posting needs to halt, which is true if a batch reverts +// unless the data poster is configured with noop storage which can tolerate reverts. +func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) { + if b.nextRevertCheckBlock > to { + return false, fmt.Errorf("wrong range, from: %d > to: %d", b.nextRevertCheckBlock, to) + } + for ; b.nextRevertCheckBlock <= to; b.nextRevertCheckBlock++ { + number := big.NewInt(b.nextRevertCheckBlock) block, err := b.l1Reader.Client().BlockByNumber(ctx, number) if err != nil { return false, fmt.Errorf("getting block: %v by number: %w", number, err) @@ -266,14 +279,19 @@ func (b *BatchPoster) checkReverts(ctx context.Context, from, to int64) (bool, e if err != nil { return false, fmt.Errorf("getting sender of transaction tx: %v, %w", tx.Hash(), err) } - if bytes.Equal(from.Bytes(), b.dataPoster.Sender().Bytes()) { + if from == b.dataPoster.Sender() { r, err := b.l1Reader.Client().TransactionReceipt(ctx, tx.Hash()) if err != nil { return false, fmt.Errorf("getting a receipt for transaction: %v, %w", tx.Hash(), err) } if r.Status == types.ReceiptStatusFailed { - log.Error("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) - return true, nil + shouldHalt := !b.config().DataPoster.UseNoOpStorage + logLevel := log.Warn + if shouldHalt { + logLevel = log.Error + } + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) + return shouldHalt, nil } } } @@ -287,40 +305,46 @@ func (b *BatchPoster) pollForReverts(ctx context.Context) { headerCh, unsubscribe := b.l1Reader.Subscribe(false) defer unsubscribe() - last := int64(0) // number of last seen block for { // Poll until: // - L1 headers reader channel is closed, or // - polling is through context, or // - we see a transaction in the block from dataposter that was reverted. select { - case h, closed := <-headerCh: - if closed { - log.Info("L1 headers channel has been closed") + case h, ok := <-headerCh: + if !ok { + log.Info("L1 headers channel checking for batch poster reverts has been closed") return } + blockNum := h.Number.Int64() // If this is the first block header, set last seen as number-1. // We may see same block number again if there is L1 reorg, in that // case we check the block again. - if last == 0 || last == h.Number.Int64() { - last = h.Number.Int64() - 1 + if b.nextRevertCheckBlock == 0 || b.nextRevertCheckBlock > blockNum { + b.nextRevertCheckBlock = blockNum } - if h.Number.Int64()-last > 100 { - log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", last, "current", h.Number) - last = h.Number.Int64() + if blockNum-b.nextRevertCheckBlock > 100 { + log.Warn("Large gap between last seen and current block number, skipping check for reverts", "last", b.nextRevertCheckBlock, "current", blockNum) + b.nextRevertCheckBlock = blockNum continue } - reverted, err := b.checkReverts(ctx, last+1, h.Number.Int64()) + reverted, err := b.checkReverts(ctx, blockNum) if err != nil { - log.Error("Checking batch reverts", "error", err) + logLevel := log.Error + if strings.Contains(err.Error(), "not found") { + // Just parent chain node inconsistency + // One node sent us a block, but another didn't have it + // We'll try to check this block again next loop + logLevel = log.Debug + } + logLevel("Error checking batch reverts", "err", err) continue } if reverted { b.batchReverted.Store(true) return } - last = h.Number.Int64() case <-ctx.Done(): return } @@ -374,8 +398,8 @@ type buildingBatch struct { } func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { - compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxBatchSize*2)) - if config.MaxBatchSize <= 40 { + compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxSize*2)) + if config.MaxSize <= 40 { panic("MaxBatchSize too small") } compressionLevel := config.CompressionLevel @@ -401,7 +425,7 @@ func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog ui return &batchSegments{ compressedBuffer: compressedBuffer, compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: config.MaxBatchSize - 40, // TODO + sizeLimit: config.MaxSize - 40, // TODO recompressionLevel: recompressionLevel, rawSegments: make([][]byte, 0, 128), delayedMsg: firstDelayed, @@ -717,7 +741,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) firstMsgTime := time.Unix(int64(firstMsg.Message.Header.Timestamp), 0) config := b.config() - forcePostBatch := time.Since(firstMsgTime) >= config.MaxBatchPostDelay + forcePostBatch := time.Since(firstMsgTime) >= config.MaxDelay var l1BoundMaxBlockNumber uint64 = math.MaxUint64 var l1BoundMaxTimestamp uint64 = math.MaxUint64 @@ -772,7 +796,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - blockNumberWithPadding := arbmath.SaturatingUAdd(arbmath.BigToUintSaturating(latestHeader.Number), uint64(config.L1BlockBoundBypass/ethPosBlockTime)) + latestBlockNumber := arbutil.ParentHeaderToL1BlockNumber(latestHeader) + blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelayBlocks)) @@ -798,6 +823,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) l1BoundMaxTimestamp = math.MaxUint64 } if msg.Message.Header.BlockNumber > l1BoundMaxBlockNumber || msg.Message.Header.Timestamp > l1BoundMaxTimestamp { + b.lastHitL1Bounds = time.Now() log.Info( "not posting more messages because block number or timestamp exceed L1 bounds", "blockNumber", msg.Message.Header.BlockNumber, @@ -815,7 +841,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if !success { // this batch is full - if !config.WaitForMaxBatchPostDelay { + if !config.WaitForMaxDelay { forcePostBatch = true } b.building.haveUsefulMessage = true @@ -872,7 +898,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - if _, err := b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit, new(big.Int)); err != nil { + tx, err := b.dataPoster.PostTransaction(ctx, firstMsgTime, nonce, newMeta, b.seqInboxAddr, data, gasLimit, new(big.Int)) + if err != nil { return false, err } log.Info( @@ -884,16 +911,20 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "current delayed", b.building.segments.delayedMsg, "total segments", len(b.building.segments.rawSegments), ) + recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount unpostedMessages := msgCount - b.building.msgCount b.backlog = uint64(unpostedMessages) / uint64(postedMessages) if b.backlog > 10 { logLevel := log.Warn - if b.backlog > 30 { + if recentlyHitL1Bounds { + logLevel = log.Info + } else if b.backlog > 30 { logLevel = log.Error } logLevel( "a large batch posting backlog exists", + "recentlyHitL1Bounds", recentlyHitL1Bounds, "currentPosition", b.building.msgCount, "messageCount", msgCount, "lastPostedMessages", postedMessages, @@ -901,7 +932,22 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) "batchBacklogEstimate", b.backlog, ) } + if recentlyHitL1Bounds { + // This backlog isn't "real" in that we don't want to post any more messages. + // Setting the backlog to 0 here ensures that we don't lower compression as a result. + b.backlog = 0 + } b.building = nil + + // If we aren't queueing up transactions, wait for the receipt before moving on to the next batch. + if config.DataPoster.UseNoOpStorage { + receipt, err := b.l1Reader.WaitForTxApproval(ctx, tx) + if err != nil { + return false, fmt.Errorf("error waiting for tx receipt: %w", err) + } + log.Info("Got successful receipt from batch poster transaction", "txHash", tx.Hash(), "blockNumber", receipt.BlockNumber, "blockHash", receipt.BlockHash) + } + return true, nil } @@ -930,30 +976,32 @@ func (b *BatchPoster) Start(ctxIn context.Context) { } if !b.redisLock.AttemptLock(ctx) { b.building = nil - return b.config().BatchPollDelay + return b.config().PollInterval } posted, err := b.maybePostSequencerBatch(ctx) + ephemeralError := errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, storage.ErrStorageRace) + if !ephemeralError { + b.firstEphemeralError = time.Time{} + } if err != nil { b.building = nil logLevel := log.Error - if errors.Is(err, AccumulatorNotFoundErr) || errors.Is(err, storage.ErrStorageRace) { + if ephemeralError { // Likely the inbox tracker just isn't caught up. // Let's see if this error disappears naturally. - if b.firstAccErr == (time.Time{}) { - b.firstAccErr = time.Now() + if b.firstEphemeralError == (time.Time{}) { + b.firstEphemeralError = time.Now() logLevel = log.Debug - } else if time.Since(b.firstAccErr) < time.Minute { + } else if time.Since(b.firstEphemeralError) < time.Minute { logLevel = log.Debug } - } else { - b.firstAccErr = time.Time{} } logLevel("error posting batch", "err", err) - return b.config().PostingErrorDelay + return b.config().ErrorDelay } else if posted { return 0 } else { - return b.config().BatchPollDelay + return b.config().PollInterval } }) } diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 1e7b74834e..b1e6555b26 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -92,24 +92,35 @@ func parseReplacementTimes(val string) ([]time.Duration, error) { } func NewDataPoster(db ethdb.Database, headerReader *headerreader.HeaderReader, auth *bind.TransactOpts, redisClient redis.UniversalClient, redisLock AttemptLocker, config ConfigFetcher, metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error)) (*DataPoster, error) { - replacementTimes, err := parseReplacementTimes(config().ReplacementTimes) + initConfig := config() + replacementTimes, err := parseReplacementTimes(initConfig.ReplacementTimes) if err != nil { return nil, err } + if headerReader.IsParentChainArbitrum() && !initConfig.UseNoOpStorage { + initConfig.UseNoOpStorage = true + log.Info("Disabling data poster storage, as parent chain appears to be an Arbitrum chain without a mempool") + } + encF := func() storage.EncoderDecoderInterface { + if config().LegacyStorageEncoding { + return &storage.LegacyEncoderDecoder{} + } + return &storage.EncoderDecoder{} + } var queue QueueStorage switch { - case config().UseLevelDB: - queue = leveldb.New(db) - case config().UseNoOpStorage: + case initConfig.UseNoOpStorage: queue = &noop.Storage{} - case redisClient == nil: - queue = slice.NewStorage() - default: + case redisClient != nil: var err error - queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &config().RedisSigner) + queue, err = redisstorage.NewStorage(redisClient, "data-poster.queue", &initConfig.RedisSigner, encF) if err != nil { return nil, err } + case initConfig.UseLevelDB: + queue = leveldb.New(db, func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) + default: + queue = slice.NewStorage(func() storage.EncoderDecoderInterface { return &storage.EncoderDecoder{} }) } return &DataPoster{ headerReader: headerReader, @@ -157,51 +168,67 @@ func (p *DataPoster) canPostWithNonce(ctx context.Context, nextNonce uint64) err return nil } -// GetNextNonceAndMeta retrieves generates next nonce, validates that a -// transaction can be posted with that nonce, and fetches "Meta" either last -// queued iterm (if queue isn't empty) or retrieves with last block. -func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { - config := p.config() - p.mutex.Lock() - defer p.mutex.Unlock() +func (p *DataPoster) waitForL1Finality() bool { + return p.config().WaitForL1Finality && !p.headerReader.IsParentChainArbitrum() +} + +// Requires the caller hold the mutex. +// Returns the next nonce, its metadata if stored, a bool indicating if the metadata is present, and an error. +// Unlike GetNextNonceAndMeta, this does not call the metadataRetriever if the metadata is not stored in the queue. +func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []byte, bool, error) { // Ensure latest finalized block state is available. blockNum, err := p.client.BlockNumber(ctx) if err != nil { - return 0, nil, err + return 0, nil, false, err } lastQueueItem, err := p.queue.FetchLast(ctx) if err != nil { - return 0, nil, err + return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { nextNonce := lastQueueItem.Data.Nonce + 1 if err := p.canPostWithNonce(ctx, nextNonce); err != nil { - return 0, nil, err + return 0, nil, false, err } - return nextNonce, lastQueueItem.Meta, nil + return nextNonce, lastQueueItem.Meta, true, nil } if err := p.updateNonce(ctx); err != nil { - if !p.queue.IsPersistent() && config.WaitForL1Finality { - return 0, nil, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) + if !p.queue.IsPersistent() && p.waitForL1Finality() { + return 0, nil, false, fmt.Errorf("error getting latest finalized nonce (and queue is not persistent): %w", err) } // Fall back to using a recent block to get the nonce. This is safe because there's nothing in the queue. nonceQueryBlock := arbmath.UintToBig(arbmath.SaturatingUSub(blockNum, 1)) log.Warn("failed to update nonce with queue empty; falling back to using a recent block", "recentBlock", nonceQueryBlock, "err", err) nonce, err := p.client.NonceAt(ctx, p.sender, nonceQueryBlock) if err != nil { - return 0, nil, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) + return 0, nil, false, fmt.Errorf("failed to get nonce at block %v: %w", nonceQueryBlock, err) } p.lastBlock = nonceQueryBlock p.nonce = nonce } - meta, err := p.metadataRetriever(ctx, p.lastBlock) - return p.nonce, meta, err + return p.nonce, nil, false, nil +} + +// GetNextNonceAndMeta retrieves generates next nonce, validates that a +// transaction can be posted with that nonce, and fetches "Meta" either last +// queued iterm (if queue isn't empty) or retrieves with last block. +func (p *DataPoster) GetNextNonceAndMeta(ctx context.Context) (uint64, []byte, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + nonce, meta, hasMeta, err := p.getNextNonceAndMaybeMeta(ctx) + if err != nil { + return 0, nil, err + } + if !hasMeta { + meta, err = p.metadataRetriever(ctx, p.lastBlock) + } + return nonce, meta, err } const minRbfIncrease = arbmath.OneInBips * 11 / 10 -func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { config := p.config() latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { @@ -210,6 +237,11 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFee if latestHeader.BaseFee == nil { return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } + softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) + softConfNonce, err := p.client.NonceAt(ctx, p.sender, softConfBlock) + if err != nil { + return nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) + } newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) @@ -252,14 +284,29 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFee newFeeCap = maxFeeCap } - balanceFeeCap := new(big.Int).Div(p.balance, new(big.Int).SetUint64(gasLimit)) + latestBalance := p.balance + balanceForTx := new(big.Int).Set(latestBalance) + if config.AllocateMempoolBalance && !config.UseNoOpStorage { + // We reserve half the balance for the first transaction, and then split the remaining balance for all after that. + // With noop storage, we don't try to replace-by-fee, so we don't need to worry about this. + balanceForTx.Div(balanceForTx, common.Big2) + if nonce != softConfNonce && config.MaxMempoolTransactions > 1 { + // balanceForTx /= config.MaxMempoolTransactions-1 + balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) + } + } + balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { log.Error( "lack of L1 balance prevents posting transaction with desired fee cap", - "balance", p.balance, + "balance", latestBalance, + "maxTransactions", config.MaxMempoolTransactions, + "balanceForTransaction", balanceForTx, "gasLimit", gasLimit, "desiredFeeCap", newFeeCap, "balanceFeeCap", balanceFeeCap, + "nonce", nonce, + "softConfNonce", softConfNonce, ) newFeeCap = balanceFeeCap } @@ -279,12 +326,21 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, gasLimit uint64, lastFee func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() - err := p.updateBalance(ctx) + + expectedNonce, _, _, err := p.getNextNonceAndMaybeMeta(ctx) + if err != nil { + return nil, err + } + if nonce != expectedNonce { + return nil, fmt.Errorf("data poster expected next transaction to have nonce %v but was requested to post transaction with nonce %v", expectedNonce, nonce) + } + + err = p.updateBalance(ctx) if err != nil { return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, err := p.feeAndTipCaps(ctx, gasLimit, nil, nil, dataCreatedAt, 0) + feeCap, tipCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, nil, nil, dataCreatedAt, 0) if err != nil { return nil, err } @@ -317,11 +373,14 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr if prevTx != nil && prevTx.Data.Nonce != newTx.Data.Nonce { return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) } - return p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx) + if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { + return fmt.Errorf("putting new tx in the queue: %w", err) + } + return nil } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { - if prevTx != newTx { + if prevTx == nil || (newTx.FullTx.Hash() != prevTx.FullTx.Hash()) { if err := p.saveTx(ctx, prevTx, newTx); err != nil { return err } @@ -342,7 +401,7 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti // The mutex must be held by the caller. func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) + newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Nonce, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) if err != nil { return err } @@ -386,7 +445,7 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa // The mutex must be held by the caller. func (p *DataPoster) updateNonce(ctx context.Context) error { var blockNumQuery *big.Int - if p.config().WaitForL1Finality { + if p.waitForL1Finality() { blockNumQuery = big.NewInt(int64(rpc.FinalizedBlockNumber)) } header, err := p.client.HeaderByNumber(ctx, blockNumQuery) @@ -459,7 +518,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap, "gas", tx.Data.Gas) } const minWait = time.Second * 10 @@ -499,7 +558,7 @@ func (p *DataPoster) Start(ctxIn context.Context) { // replacing them by fee. queueContents, err := p.queue.FetchContents(ctx, unconfirmedNonce, maxTxsToRbf) if err != nil { - log.Warn("Failed to get tx queue contents", "err", err) + log.Error("Failed to fetch tx queue contents", "err", err) return minWait } for index, tx := range queueContents { @@ -555,19 +614,23 @@ type QueueStorage interface { } type DataPosterConfig struct { - RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` - ReplacementTimes string `koanf:"replacement-times"` - WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` - MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` - MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` - TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` - UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` - MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` - MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` - UseLevelDB bool `koanf:"use-leveldb" reload:"hot"` - UseNoOpStorage bool `koanf:"use-noop-storage" reload:"hot"` - MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` - EnableLevelDB bool `koanf:"enable-leveldb" reload:"hot"` + RedisSigner signature.SimpleHmacConfig `koanf:"redis-signer"` + ReplacementTimes string `koanf:"replacement-times"` + // This is forcibly disabled if the parent chain is an Arbitrum chain, + // so you should probably use DataPoster's waitForL1Finality method instead of reading this field directly. + WaitForL1Finality bool `koanf:"wait-for-l1-finality" reload:"hot"` + MaxMempoolTransactions uint64 `koanf:"max-mempool-transactions" reload:"hot"` + MaxQueuedTransactions int `koanf:"max-queued-transactions" reload:"hot"` + TargetPriceGwei float64 `koanf:"target-price-gwei" reload:"hot"` + UrgencyGwei float64 `koanf:"urgency-gwei" reload:"hot"` + MinFeeCapGwei float64 `koanf:"min-fee-cap-gwei" reload:"hot"` + MinTipCapGwei float64 `koanf:"min-tip-cap-gwei" reload:"hot"` + MaxTipCapGwei float64 `koanf:"max-tip-cap-gwei" reload:"hot"` + NonceRbfSoftConfs uint64 `koanf:"nonce-rbf-soft-confs" reload:"hot"` + AllocateMempoolBalance bool `koanf:"allocate-mempool-balance" reload:"hot"` + UseLevelDB bool `koanf:"use-leveldb"` + UseNoOpStorage bool `koanf:"use-noop-storage"` + LegacyStorageEncoding bool `koanf:"legacy-storage-encoding" reload:"hot"` } // ConfigFetcher function type is used instead of directly passing config so @@ -583,8 +646,12 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Float64(prefix+".urgency-gwei", DefaultDataPosterConfig.UrgencyGwei, "the urgency to use for maximum fee cap calculation") f.Float64(prefix+".min-fee-cap-gwei", DefaultDataPosterConfig.MinFeeCapGwei, "the minimum fee cap to post transactions at") f.Float64(prefix+".min-tip-cap-gwei", DefaultDataPosterConfig.MinTipCapGwei, "the minimum tip cap to post transactions at") + f.Float64(prefix+".max-tip-cap-gwei", DefaultDataPosterConfig.MaxTipCapGwei, "the maximum tip cap to post transactions at") + f.Uint64(prefix+".nonce-rbf-soft-confs", DefaultDataPosterConfig.NonceRbfSoftConfs, "the maximum probable reorg depth, used to determine when a transaction will no longer likely need replaced-by-fee") + f.Bool(prefix+".allocate-mempool-balance", DefaultDataPosterConfig.AllocateMempoolBalance, "if true, don't put transactions in the mempool that spend a total greater than the batch poster's balance") f.Bool(prefix+".use-leveldb", DefaultDataPosterConfig.UseLevelDB, "uses leveldb when enabled") - f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseLevelDB, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".use-noop-storage", DefaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") + f.Bool(prefix+".legacy-storage-encoding", DefaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") signature.SimpleHmacConfigAddOptions(prefix+".redis-signer", f) } @@ -593,24 +660,39 @@ var DefaultDataPosterConfig = DataPosterConfig{ WaitForL1Finality: true, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 64, + MaxMempoolTransactions: 10, MinTipCapGwei: 0.05, - UseLevelDB: false, - UseNoOpStorage: false, MaxTipCapGwei: 5, - EnableLevelDB: false, + NonceRbfSoftConfs: 1, + AllocateMempoolBalance: true, + UseLevelDB: true, + UseNoOpStorage: false, + LegacyStorageEncoding: true, } +var DefaultDataPosterConfigForValidator = func() DataPosterConfig { + config := DefaultDataPosterConfig + config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + return config +}() + var TestDataPosterConfig = DataPosterConfig{ ReplacementTimes: "1s,2s,5s,10s,20s,30s,1m,5m", RedisSigner: signature.TestSimpleHmacConfig, WaitForL1Finality: false, TargetPriceGwei: 60., UrgencyGwei: 2., - MaxMempoolTransactions: 64, + MaxMempoolTransactions: 10, MinTipCapGwei: 0.05, + MaxTipCapGwei: 5, + NonceRbfSoftConfs: 1, + AllocateMempoolBalance: true, UseLevelDB: false, UseNoOpStorage: false, - MaxTipCapGwei: 5, - EnableLevelDB: false, } + +var TestDataPosterConfigForValidator = func() DataPosterConfig { + config := TestDataPosterConfig + config.MaxMempoolTransactions = 1 // the validator cannot queue transactions + return config +}() diff --git a/arbnode/dataposter/leveldb/leveldb.go b/arbnode/dataposter/leveldb/leveldb.go index e41a8665a6..cfb34b04f7 100644 --- a/arbnode/dataposter/leveldb/leveldb.go +++ b/arbnode/dataposter/leveldb/leveldb.go @@ -12,14 +12,14 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/syndtr/goleveldb/leveldb" ) // Storage implements leveldb based storage for batch poster. type Storage struct { - db ethdb.Database + db ethdb.Database + encDec storage.EncoderDecoderF } var ( @@ -31,16 +31,8 @@ var ( countKey = []byte(".count_key") ) -func New(db ethdb.Database) *Storage { - return &Storage{db: db} -} - -func (s *Storage) decodeItem(data []byte) (*storage.QueuedTransaction, error) { - var item storage.QueuedTransaction - if err := rlp.DecodeBytes(data, &item); err != nil { - return nil, fmt.Errorf("decoding item: %w", err) - } - return &item, nil +func New(db ethdb.Database, enc storage.EncoderDecoderF) *Storage { + return &Storage{db: db, encDec: enc} } func idxToKey(idx uint64) []byte { @@ -55,7 +47,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu if !it.Next() { break } - item, err := s.decodeItem(it.Value()) + item, err := s.encDec().Decode(it.Value()) if err != nil { return nil, err } @@ -84,7 +76,7 @@ func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, er if err != nil { return nil, err } - return s.decodeItem(val) + return s.encDec().Decode(val) } func (s *Storage) Prune(ctx context.Context, until uint64) error { @@ -117,7 +109,7 @@ func (s *Storage) valueAt(_ context.Context, key []byte) ([]byte, error) { val, err := s.db.Get(key) if err != nil { if isErrNotFound(err) { - return rlp.EncodeToBytes((*storage.QueuedTransaction)(nil)) + return s.encDec().Encode((*storage.QueuedTransaction)(nil)) } return nil, err } @@ -130,14 +122,14 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return err } - prevEnc, err := rlp.EncodeToBytes(prev) + prevEnc, err := s.encDec().Encode(prev) if err != nil { return fmt.Errorf("encoding previous item: %w", err) } if !bytes.Equal(stored, prevEnc) { return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, stored, prevEnc) } - newEnc, err := rlp.EncodeToBytes(new) + newEnc, err := s.encDec().Encode(new) if err != nil { return fmt.Errorf("encoding new item: %w", err) } diff --git a/arbnode/dataposter/redis/redisstorage.go b/arbnode/dataposter/redis/redisstorage.go index e6fe666c69..f2393611b2 100644 --- a/arbnode/dataposter/redis/redisstorage.go +++ b/arbnode/dataposter/redis/redisstorage.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/rlp" "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/util/signature" @@ -23,14 +22,15 @@ type Storage struct { client redis.UniversalClient signer *signature.SimpleHmac key string + encDec storage.EncoderDecoderF } -func NewStorage(client redis.UniversalClient, key string, signerConf *signature.SimpleHmacConfig) (*Storage, error) { +func NewStorage(client redis.UniversalClient, key string, signerConf *signature.SimpleHmacConfig, enc storage.EncoderDecoderF) (*Storage, error) { signer, err := signature.NewSimpleHmac(signerConf) if err != nil { return nil, err } - return &Storage{client, signer, key}, nil + return &Storage{client, signer, key, enc}, nil } func joinHmacMsg(msg []byte, sig []byte) ([]byte, error) { @@ -65,16 +65,15 @@ func (s *Storage) FetchContents(ctx context.Context, startingIndex uint64, maxRe } var items []*storage.QueuedTransaction for _, itemString := range itemStrings { - var item storage.QueuedTransaction data, err := s.peelVerifySignature([]byte(itemString)) if err != nil { return nil, err } - err = rlp.DecodeBytes(data, &item) + item, err := s.encDec().Decode(data) if err != nil { return nil, err } - items = append(items, &item) + items = append(items, item) } return items, nil } @@ -95,16 +94,15 @@ func (s *Storage) FetchLast(ctx context.Context) (*storage.QueuedTransaction, er } var ret *storage.QueuedTransaction if len(itemStrings) > 0 { - var item storage.QueuedTransaction data, err := s.peelVerifySignature([]byte(itemStrings[0])) if err != nil { return nil, err } - err = rlp.DecodeBytes(data, &item) + item, err := s.encDec().Decode(data) if err != nil { return nil, err } - ret = &item + ret = item } return ret, nil } @@ -116,6 +114,17 @@ func (s *Storage) Prune(ctx context.Context, until uint64) error { return nil } +// normalizeDecoding decodes data (regardless of what encoding it used), and +// encodes it according to current encoding for storage. +// As a result, encoded data is transformed to currently used encoding. +func (s *Storage) normalizeDecoding(data []byte) ([]byte, error) { + item, err := s.encDec().Decode(data) + if err != nil { + return nil, err + } + return s.encDec().Encode(item) +} + func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.QueuedTransaction) error { if new == nil { return fmt.Errorf("tried to insert nil item at index %v", index) @@ -144,21 +153,24 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return fmt.Errorf("failed to validate item already in redis at index%v: %w", index, err) } - prevItemEncoded, err := rlp.EncodeToBytes(prev) + verifiedItem, err = s.normalizeDecoding(verifiedItem) + if err != nil { + return fmt.Errorf("error normalizing encoding for verified item: %w", err) + } + prevItemEncoded, err := s.encDec().Encode(prev) if err != nil { return err } if !bytes.Equal(verifiedItem, prevItemEncoded) { return fmt.Errorf("%w: replacing different item than expected at index %v", storage.ErrStorageRace, index) } - err = pipe.ZRem(ctx, s.key, haveItems[0]).Err() - if err != nil { + if err := pipe.ZRem(ctx, s.key, haveItems[0]).Err(); err != nil { return err } } else { return fmt.Errorf("expected only one return value for Put but got %v", len(haveItems)) } - newItemEncoded, err := rlp.EncodeToBytes(*new) + newItemEncoded, err := s.encDec().Encode(new) if err != nil { return err } @@ -170,11 +182,10 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu if err != nil { return err } - err = pipe.ZAdd(ctx, s.key, &redis.Z{ + if err := pipe.ZAdd(ctx, s.key, &redis.Z{ Score: float64(index), Member: string(signedItem), - }).Err() - if err != nil { + }).Err(); err != nil { return err } _, err = pipe.Exec(ctx) diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index 6eda5ca9a3..04286df411 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -9,25 +9,17 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" ) type Storage struct { firstNonce uint64 queue [][]byte + encDec func() storage.EncoderDecoderInterface } -func NewStorage() *Storage { - return &Storage{} -} - -func (s *Storage) decodeItem(data []byte) (*storage.QueuedTransaction, error) { - var item storage.QueuedTransaction - if err := rlp.DecodeBytes(data, &item); err != nil { - return nil, fmt.Errorf("decoding item: %w", err) - } - return &item, nil +func NewStorage(encDec func() storage.EncoderDecoderInterface) *Storage { + return &Storage{encDec: encDec} } func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResults uint64) ([]*storage.QueuedTransaction, error) { @@ -43,7 +35,7 @@ func (s *Storage) FetchContents(_ context.Context, startingIndex uint64, maxResu } var res []*storage.QueuedTransaction for _, r := range txs { - item, err := s.decodeItem(r) + item, err := s.encDec().Decode(r) if err != nil { return nil, err } @@ -56,7 +48,7 @@ func (s *Storage) FetchLast(context.Context) (*storage.QueuedTransaction, error) if len(s.queue) == 0 { return nil, nil } - return s.decodeItem(s.queue[len(s.queue)-1]) + return s.encDec().Decode(s.queue[len(s.queue)-1]) } func (s *Storage) Prune(_ context.Context, until uint64) error { @@ -73,7 +65,7 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued if new == nil { return fmt.Errorf("tried to insert nil item at index %v", index) } - newEnc, err := rlp.EncodeToBytes(new) + newEnc, err := s.encDec().Encode(new) if err != nil { return fmt.Errorf("encoding new item: %w", err) } @@ -93,7 +85,7 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued if queueIdx > len(s.queue) { return fmt.Errorf("attempted to set out-of-bounds index %v in queue starting at %v of length %v", index, s.firstNonce, len(s.queue)) } - prevEnc, err := rlp.EncodeToBytes(prev) + prevEnc, err := s.encDec().Encode(prev) if err != nil { return fmt.Errorf("encoding previous item: %w", err) } diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 174ab131ac..70637c48e0 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -2,25 +2,150 @@ package storage import ( "errors" + "fmt" "time" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/offchainlabs/nitro/arbutil" ) var ( ErrStorageRace = errors.New("storage race error") BlockValidatorPrefix string = "v" // the prefix for all block validator keys + StakerPrefix string = "S" // the prefix for all staker keys BatchPosterPrefix string = "b" // the prefix for all batch poster keys // TODO(anodar): move everything else from schema.go file to here once // execution split is complete. ) type QueuedTransaction struct { - FullTx *types.Transaction `rlp:"nil"` + FullTx *types.Transaction Data types.DynamicFeeTx Meta []byte Sent bool Created time.Time // may be earlier than the tx was given to the tx poster NextReplacement time.Time } + +// LegacyQueuedTransaction is used for backwards compatibility. +// Before https://github.com/OffchainLabs/nitro/pull/1773: the queuedTransaction +// looked like this and was rlp encoded directly. After the pr, we are store +// rlp encoding of Meta into queuedTransaction and rlp encoding it once more +// to store it. +type LegacyQueuedTransaction struct { + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta BatchPosterPosition + Sent bool + Created time.Time // may be earlier than the tx was given to the tx poster + NextReplacement time.Time +} + +// This is also for legacy reason. Since Batchposter is in arbnode package, +// we can't refer to BatchPosterPosition type there even if we export it (that +// would create cyclic dependency). +// We'll drop this struct in a few releases when we drop legacy encoding. +type BatchPosterPosition struct { + MessageCount arbutil.MessageIndex + DelayedMessageCount uint64 + NextSeqNum uint64 +} + +func DecodeLegacyQueuedTransaction(data []byte) (*LegacyQueuedTransaction, error) { + var val LegacyQueuedTransaction + if err := rlp.DecodeBytes(data, &val); err != nil { + return nil, fmt.Errorf("decoding legacy queued transaction: %w", err) + } + return &val, nil +} + +func LegacyToQueuedTransaction(legacyQT *LegacyQueuedTransaction) (*QueuedTransaction, error) { + meta, err := rlp.EncodeToBytes(legacyQT.Meta) + if err != nil { + return nil, fmt.Errorf("converting legacy to queued transaction: %w", err) + } + return &QueuedTransaction{ + FullTx: legacyQT.FullTx, + Data: legacyQT.Data, + Meta: meta, + Sent: legacyQT.Sent, + Created: legacyQT.Created, + NextReplacement: legacyQT.NextReplacement, + }, nil +} + +func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, error) { + if qt == nil { + return nil, nil + } + var meta BatchPosterPosition + if qt.Meta != nil { + if err := rlp.DecodeBytes(qt.Meta, &meta); err != nil { + return nil, fmt.Errorf("converting queued transaction to legacy: %w", err) + } + } + return &LegacyQueuedTransaction{ + FullTx: qt.FullTx, + Data: qt.Data, + Meta: meta, + Sent: qt.Sent, + Created: qt.Created, + NextReplacement: qt.NextReplacement, + }, nil +} + +// Decode tries to decode QueuedTransaction, if that fails it tries to decode +// into legacy queued transaction and converts to queued +func decode(data []byte) (*QueuedTransaction, error) { + var item QueuedTransaction + if err := rlp.DecodeBytes(data, &item); err != nil { + log.Debug("Failed to decode QueuedTransaction, attempting to decide legacy queued transaction", "error", err) + val, err := DecodeLegacyQueuedTransaction(data) + if err != nil { + return nil, fmt.Errorf("decoding legacy item: %w", err) + } + log.Debug("Succeeded decoding QueuedTransaction with legacy encoder") + return LegacyToQueuedTransaction(val) + } + return &item, nil +} + +type EncoderDecoder struct{} + +func (e *EncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { + return rlp.EncodeToBytes(qt) +} + +func (e *EncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { + return decode(data) +} + +type LegacyEncoderDecoder struct{} + +func (e *LegacyEncoderDecoder) Encode(qt *QueuedTransaction) ([]byte, error) { + legacyQt, err := QueuedTransactionToLegacy(qt) + if err != nil { + return nil, fmt.Errorf("encoding legacy item: %w", err) + } + return rlp.EncodeToBytes(legacyQt) +} + +func (le *LegacyEncoderDecoder) Decode(data []byte) (*QueuedTransaction, error) { + return decode(data) +} + +// Typically interfaces belong to where they are being used, not at implementing +// site, but this is used in all storages (besides no-op) and all of them +// require all the functions for this interface. +type EncoderDecoderInterface interface { + Encode(*QueuedTransaction) ([]byte, error) + Decode([]byte) (*QueuedTransaction, error) +} + +// EncoderDecoderF is a function type that returns encoder/decoder interface. +// This is needed to implement hot-reloading flag to switch encoding/decoding +// strategy on the fly. +type EncoderDecoderF func() EncoderDecoderInterface diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index 2424ac0845..d536e5da05 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -6,8 +6,10 @@ import ( "path" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/offchainlabs/nitro/arbnode/dataposter/leveldb" @@ -21,42 +23,55 @@ import ( var ignoreData = cmp.Options{ cmpopts.IgnoreUnexported( + types.Transaction{}, types.DynamicFeeTx{}, big.Int{}, ), cmpopts.IgnoreFields(types.Transaction{}, "hash", "size", "from"), } -func newLevelDBStorage(t *testing.T) *leveldb.Storage { +func newLevelDBStorage(t *testing.T, encF storage.EncoderDecoderF) *leveldb.Storage { t.Helper() db, err := rawdb.NewLevelDBDatabase(path.Join(t.TempDir(), "level.db"), 0, 0, "default", false) if err != nil { t.Fatalf("NewLevelDBDatabase() unexpected error: %v", err) } - return leveldb.New(db) + return leveldb.New(db, encF) } -func newSliceStorage() *slice.Storage { - return slice.NewStorage() +func newSliceStorage(encF storage.EncoderDecoderF) *slice.Storage { + return slice.NewStorage(encF) } -func newRedisStorage(ctx context.Context, t *testing.T) *redis.Storage { +func newRedisStorage(ctx context.Context, t *testing.T, encF storage.EncoderDecoderF) *redis.Storage { t.Helper() redisUrl := redisutil.CreateTestRedis(ctx, t) client, err := redisutil.RedisClientFromURL(redisUrl) if err != nil { t.Fatalf("RedisClientFromURL(%q) unexpected error: %v", redisUrl, err) } - s, err := redis.NewStorage(client, "", &signature.TestSimpleHmacConfig) + s, err := redis.NewStorage(client, "", &signature.TestSimpleHmacConfig, encF) if err != nil { t.Fatalf("redis.NewStorage() unexpected error: %v", err) } return s } -func valueOf(i int) *storage.QueuedTransaction { +func valueOf(t *testing.T, i int) *storage.QueuedTransaction { + t.Helper() + meta, err := rlp.EncodeToBytes(storage.BatchPosterPosition{DelayedMessageCount: uint64(i)}) + if err != nil { + t.Fatalf("Encoding batch poster position, error: %v", err) + } return &storage.QueuedTransaction{ - Meta: []byte{byte(i)}, + FullTx: types.NewTransaction( + uint64(i), + common.Address{}, + big.NewInt(int64(i)), + uint64(i), + big.NewInt(int64(i)), + []byte{byte(i)}), + Meta: meta, Data: types.DynamicFeeTx{ ChainID: big.NewInt(int64(i)), Nonce: uint64(i), @@ -73,10 +88,10 @@ func valueOf(i int) *storage.QueuedTransaction { } } -func values(from, to int) []*storage.QueuedTransaction { +func values(t *testing.T, from, to int) []*storage.QueuedTransaction { var res []*storage.QueuedTransaction for i := from; i <= to; i++ { - res = append(res, valueOf(i)) + res = append(res, valueOf(t, i)) } return res } @@ -85,7 +100,7 @@ func values(from, to int) []*storage.QueuedTransaction { func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage { t.Helper() for i := 0; i < 20; i++ { - if err := s.Put(ctx, uint64(i), nil, valueOf(i)); err != nil { + if err := s.Put(ctx, uint64(i), nil, valueOf(t, i)); err != nil { t.Fatalf("Error putting a key/value: %v", err) } } @@ -95,10 +110,18 @@ func initStorage(ctx context.Context, t *testing.T, s QueueStorage) QueueStorage // Returns a map of all empty storages. func storages(t *testing.T) map[string]QueueStorage { t.Helper() + f := func(enc storage.EncoderDecoderInterface) storage.EncoderDecoderF { + return func() storage.EncoderDecoderInterface { + return enc + } + } return map[string]QueueStorage{ - "levelDB": newLevelDBStorage(t), - "slice": newSliceStorage(), - "redis": newRedisStorage(context.Background(), t), + "levelDBLegacy": newLevelDBStorage(t, f(&storage.LegacyEncoderDecoder{})), + "sliceLegacy": newSliceStorage(f(&storage.LegacyEncoderDecoder{})), + "redisLegacy": newRedisStorage(context.Background(), t, f(&storage.LegacyEncoderDecoder{})), + "levelDB": newLevelDBStorage(t, f(&storage.EncoderDecoder{})), + "slice": newSliceStorage(f(&storage.EncoderDecoder{})), + "redis": newRedisStorage(context.Background(), t, f(&storage.EncoderDecoder{})), } } @@ -125,13 +148,13 @@ func TestFetchContents(t *testing.T) { desc: "sequence with single digits", startIdx: 5, maxResults: 3, - want: values(5, 7), + want: values(t, 5, 7), }, { desc: "corner case of single element", startIdx: 0, maxResults: 1, - want: values(0, 0), + want: values(t, 0, 0), }, { desc: "no elements", @@ -143,13 +166,13 @@ func TestFetchContents(t *testing.T) { desc: "sequence with variable number of digits", startIdx: 9, maxResults: 3, - want: values(9, 11), + want: values(t, 9, 11), }, { desc: "max results goes over the last element", startIdx: 13, maxResults: 10, - want: values(13, 19), + want: values(t, 13, 19), }, } { t.Run(name+"_"+tc.desc, func(t *testing.T) { @@ -171,7 +194,7 @@ func TestLast(t *testing.T) { t.Run(name, func(t *testing.T) { ctx := context.Background() for i := 0; i < cnt; i++ { - val := valueOf(i) + val := valueOf(t, i) if err := s.Put(ctx, uint64(i), nil, val); err != nil { t.Fatalf("Error putting a key/value: %v", err) } @@ -185,12 +208,12 @@ func TestLast(t *testing.T) { } }) - last := valueOf(cnt - 1) + last := valueOf(t, cnt-1) t.Run(name+"_update_entries", func(t *testing.T) { ctx := context.Background() for i := 0; i < cnt-1; i++ { - prev := valueOf(i) - newVal := valueOf(cnt + i) + prev := valueOf(t, i) + newVal := valueOf(t, cnt+i) if err := s.Put(ctx, uint64(i), prev, newVal); err != nil { t.Fatalf("Error putting a key/value: %v, prev: %v, new: %v", err, prev, newVal) } @@ -227,17 +250,17 @@ func TestPrune(t *testing.T) { { desc: "prune all but one", pruneFrom: 19, - want: values(19, 19), + want: values(t, 19, 19), }, { desc: "pruning first element", pruneFrom: 1, - want: values(1, 19), + want: values(t, 1, 19), }, { desc: "pruning first 11 elements", pruneFrom: 11, - want: values(11, 19), + want: values(t, 11, 19), }, { desc: "pruning from higher than biggest index", diff --git a/arbnode/delayed_sequencer.go b/arbnode/delayed_sequencer.go index f45a85ac49..aa6d43785e 100644 --- a/arbnode/delayed_sequencer.go +++ b/arbnode/delayed_sequencer.go @@ -52,14 +52,14 @@ func DelayedSequencerConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultDelayedSequencerConfig = DelayedSequencerConfig{ Enable: false, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } var TestDelayedSequencerConfig = DelayedSequencerConfig{ Enable: true, FinalizeDistance: 20, - RequireFullFinality: true, + RequireFullFinality: false, UseMergeFinality: true, } diff --git a/arbnode/execution/blockchain.go b/arbnode/execution/blockchain.go index 88e7044e8d..0ce76d8ccd 100644 --- a/arbnode/execution/blockchain.go +++ b/arbnode/execution/blockchain.go @@ -26,15 +26,15 @@ import ( ) type CachingConfig struct { - Archive bool `koanf:"archive"` - BlockCount uint64 `koanf:"block-count"` - BlockAge time.Duration `koanf:"block-age"` - TrieTimeLimit time.Duration `koanf:"trie-time-limit"` - TrieDirtyCache int `koanf:"trie-dirty-cache"` - TrieCleanCache int `koanf:"trie-clean-cache"` - SnapshotCache int `koanf:"snapshot-cache"` - DatabaseCache int `koanf:"database-cache"` - SnapshotRestoreMaxGas uint64 `koanf:"snapshot-restore-gas-limit"` + Archive bool `koanf:"archive"` + BlockCount uint64 `koanf:"block-count"` + BlockAge time.Duration `koanf:"block-age"` + TrieTimeLimit time.Duration `koanf:"trie-time-limit"` + TrieDirtyCache int `koanf:"trie-dirty-cache"` + TrieCleanCache int `koanf:"trie-clean-cache"` + SnapshotCache int `koanf:"snapshot-cache"` + DatabaseCache int `koanf:"database-cache"` + SnapshotRestoreGasLimit uint64 `koanf:"snapshot-restore-gas-limit"` } func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -46,19 +46,19 @@ func CachingConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".trie-clean-cache", DefaultCachingConfig.TrieCleanCache, "amount of memory in megabytes to cache unchanged state trie nodes with") f.Int(prefix+".snapshot-cache", DefaultCachingConfig.SnapshotCache, "amount of memory in megabytes to cache state snapshots with") f.Int(prefix+".database-cache", DefaultCachingConfig.DatabaseCache, "amount of memory in megabytes to cache database contents with") - f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreMaxGas, "maximum gas rolled back to recover snapshot") + f.Uint64(prefix+".snapshot-restore-gas-limit", DefaultCachingConfig.SnapshotRestoreGasLimit, "maximum gas rolled back to recover snapshot") } var DefaultCachingConfig = CachingConfig{ - Archive: false, - BlockCount: 128, - BlockAge: 30 * time.Minute, - TrieTimeLimit: time.Hour, - TrieDirtyCache: 1024, - TrieCleanCache: 600, - SnapshotCache: 400, - DatabaseCache: 2048, - SnapshotRestoreMaxGas: 300_000_000_000, + Archive: false, + BlockCount: 128, + BlockAge: 30 * time.Minute, + TrieTimeLimit: time.Hour, + TrieDirtyCache: 1024, + TrieCleanCache: 600, + SnapshotCache: 400, + DatabaseCache: 2048, + SnapshotRestoreGasLimit: 300_000_000_000, } func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core.CacheConfig { @@ -79,7 +79,7 @@ func DefaultCacheConfigFor(stack *node.Node, cachingConfig *CachingConfig) *core TrieRetention: cachingConfig.BlockAge, SnapshotLimit: cachingConfig.SnapshotCache, Preimages: baseConf.Preimages, - SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreMaxGas, + SnapshotRestoreMaxGas: cachingConfig.SnapshotRestoreGasLimit, } } diff --git a/arbnode/execution/executionengine.go b/arbnode/execution/executionengine.go index d8029650d7..da01e27983 100644 --- a/arbnode/execution/executionengine.go +++ b/arbnode/execution/executionengine.go @@ -599,7 +599,7 @@ func (s *ExecutionEngine) Start(ctx_in context.Context) { s.latestBlockMutex.Lock() block := s.latestBlock s.latestBlockMutex.Unlock() - if block != lastBlock && block != nil { + if block != nil && (lastBlock == nil || block.Hash() != lastBlock.Hash()) { log.Info( "created block", "l2Block", block.Number(), diff --git a/arbnode/execution/sequencer.go b/arbnode/execution/sequencer.go index ea818beb6b..927ce7ac08 100644 --- a/arbnode/execution/sequencer.go +++ b/arbnode/execution/sequencer.go @@ -110,6 +110,7 @@ var DefaultSequencerConfig = SequencerConfig{ NonceCacheSize: 1024, Dangerous: DefaultDangerousSequencerConfig, // 95% of the default batch poster limit, leaving 5KB for headers and such + // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxTxDataSize: 95000, NonceFailureCacheSize: 1024, NonceFailureCacheExpiry: time.Second, @@ -182,9 +183,9 @@ func newNonceCache(size int) *nonceCache { func (c *nonceCache) matches(header *types.Header) bool { if c.dirty != nil { - // The header is updated as the block is built, - // so instead of checking its hash, we do a pointer comparison. - return c.dirty == header + // Note, even though the of the header changes, c.dirty points to the + // same header, hence hashes will be the same and this check will pass. + return headerreader.HeadersEqual(c.dirty, header) } return c.block == header.ParentHash } @@ -655,7 +656,7 @@ func (s *Sequencer) precheckNonces(queueItems []txQueueItem) []txQueueItem { return queueItems } nextHeaderNumber := arbmath.BigAdd(latestHeader.Number, common.Big1) - signer := types.MakeSigner(bc.Config(), nextHeaderNumber) + signer := types.MakeSigner(bc.Config(), nextHeaderNumber, latestHeader.Time) outputQueueItems := make([]txQueueItem, 0, len(queueItems)) var nextQueueItem *txQueueItem var queueItemsIdx int diff --git a/arbnode/execution/tx_pre_checker.go b/arbnode/execution/tx_pre_checker.go index 01cef6d7a4..968a1f266b 100644 --- a/arbnode/execution/tx_pre_checker.go +++ b/arbnode/execution/tx_pre_checker.go @@ -18,6 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" flag "github.com/spf13/pflag" ) @@ -115,7 +116,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if tx.Gas() < params.TxGas { return core.ErrIntrinsicGas } - sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number), tx) + sender, err := types.Sender(types.MakeSigner(chainConfig, header.Number, header.Time), tx) if err != nil { return err } @@ -134,7 +135,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty return MakeNonceError(sender, tx.Nonce(), stateNonce) } extraInfo := types.DeserializeHeaderExtraInformation(header) - intrinsic, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, chainConfig.IsHomestead(header.Number), chainConfig.IsIstanbul(header.Number), chainConfig.IsShanghai(header.Time, extraInfo.ArbOSFormatVersion)) + intrinsic, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, chainConfig.IsHomestead(header.Number), chainConfig.IsIstanbul(header.Number), chainConfig.IsShanghai(header.Number, header.Time, extraInfo.ArbOSFormatVersion)) if err != nil { return err } @@ -144,11 +145,6 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty if config.Strictness < TxPreCheckerStrictnessLikelyCompatible { return nil } - balance := statedb.GetBalance(sender) - cost := tx.Cost() - if arbmath.BigLessThan(balance, cost) { - return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) - } if options != nil { if err := options.Check(extraInfo.L1BlockNumber, header.Time, statedb); err != nil { conditionalTxRejectedByTxPreCheckerCurrentStateCounter.Inc(1) @@ -170,7 +166,7 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty oldHeader = previousHeader blocksTraversed++ } - if oldHeader != header { + if !headerreader.HeadersEqual(oldHeader, header) { secondOldStatedb, err := bc.StateAt(oldHeader.Root) if err != nil { return fmt.Errorf("failed to get old state: %w", err) @@ -184,6 +180,11 @@ func PreCheckTx(bc *core.BlockChain, chainConfig *params.ChainConfig, header *ty conditionalTxAcceptedByTxPreCheckerOldStateCounter.Inc(1) } } + balance := statedb.GetBalance(sender) + cost := tx.Cost() + if arbmath.BigLessThan(balance, cost) { + return fmt.Errorf("%w: address %v have %v want %v", core.ErrInsufficientFunds, sender, balance, cost) + } if config.Strictness >= TxPreCheckerStrictnessFullValidation && tx.Nonce() > stateNonce { return MakeNonceError(sender, tx.Nonce(), stateNonce) } diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index b469ecdbef..31bf1a63ff 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -33,22 +33,23 @@ type MessagePruner struct { } type MessagePrunerConfig struct { - Enable bool `koanf:"enable"` - MessagePruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` - MinBatchesLeft uint64 `koanf:"min-batches-left" reload:"hot"` + Enable bool `koanf:"enable"` + // Message pruning interval. + PruneInterval time.Duration `koanf:"prune-interval" reload:"hot"` + MinBatchesLeft uint64 `koanf:"min-batches-left" reload:"hot"` } type MessagePrunerConfigFetcher func() *MessagePrunerConfig var DefaultMessagePrunerConfig = MessagePrunerConfig{ - Enable: true, - MessagePruneInterval: time.Minute, - MinBatchesLeft: 2, + Enable: true, + PruneInterval: time.Minute, + MinBatchesLeft: 2, } func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultMessagePrunerConfig.Enable, "enable message pruning") - f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.MessagePruneInterval, "interval for running message pruner") + f.Duration(prefix+".prune-interval", DefaultMessagePrunerConfig.PruneInterval, "interval for running message pruner") f.Uint64(prefix+".min-batches-left", DefaultMessagePrunerConfig.MinBatchesLeft, "min number of batches not pruned") } @@ -70,7 +71,7 @@ func (m *MessagePruner) UpdateLatestConfirmed(count arbutil.MessageIndex, global return } - if m.lastPruneDone.Add(m.config().MessagePruneInterval).After(time.Now()) { + if m.lastPruneDone.Add(m.config().PruneInterval).After(time.Now()) { m.pruningLock.Unlock() return } diff --git a/arbnode/node.go b/arbnode/node.go index 2f5e4a69b2..5bdc716264 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -40,6 +40,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/ospgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/contracts" @@ -234,19 +235,12 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com } } -func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, readerConfig headerreader.ConfigFetcher, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { - l1Reader, err := headerreader.New(ctx, l1client, readerConfig) - if err != nil { - return nil, err - } - l1Reader.Start(ctx) - defer l1Reader.StopAndWait() - +func DeployOnL1(ctx context.Context, parentChainReader *headerreader.HeaderReader, deployAuth *bind.TransactOpts, batchPoster common.Address, authorizeValidators uint64, config rollupgen.Config) (*chaininfo.RollupAddresses, error) { if config.WasmModuleRoot == (common.Hash{}) { return nil, errors.New("no machine specified") } - rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, l1Reader, deployAuth) + rollupCreator, _, validatorUtils, validatorWalletCreator, err := deployRollupCreator(ctx, parentChainReader, deployAuth) if err != nil { return nil, fmt.Errorf("error deploying rollup creator: %w", err) } @@ -265,7 +259,7 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b if err != nil { return nil, fmt.Errorf("error submitting create rollup tx: %w", err) } - receipt, err := l1Reader.WaitForTxApproval(ctx, tx) + receipt, err := parentChainReader.WaitForTxApproval(ctx, tx) if err != nil { return nil, fmt.Errorf("error executing create rollup tx: %w", err) } @@ -286,34 +280,34 @@ func DeployOnL1(ctx context.Context, l1client arbutil.L1Interface, deployAuth *b } type Config struct { - RPC arbitrum.Config `koanf:"rpc"` - Sequencer execution.SequencerConfig `koanf:"sequencer" reload:"hot"` - L1Reader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` - InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` - DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` - BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` - MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` - ForwardingTargetImpl string `koanf:"forwarding-target"` - Forwarder execution.ForwarderConfig `koanf:"forwarder"` - TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` - BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` - RecordingDB arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` - Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` - Staker staker.L1ValidatorConfig `koanf:"staker"` - SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` - DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` - Dangerous DangerousConfig `koanf:"dangerous"` - Caching execution.CachingConfig `koanf:"caching"` - Archive bool `koanf:"archive"` - TxLookupLimit uint64 `koanf:"tx-lookup-limit"` - TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` - Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` - ResourceManagement resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + RPC arbitrum.Config `koanf:"rpc"` + Sequencer execution.SequencerConfig `koanf:"sequencer" reload:"hot"` + ParentChainReader headerreader.Config `koanf:"parent-chain-reader" reload:"hot"` + InboxReader InboxReaderConfig `koanf:"inbox-reader" reload:"hot"` + DelayedSequencer DelayedSequencerConfig `koanf:"delayed-sequencer" reload:"hot"` + BatchPoster BatchPosterConfig `koanf:"batch-poster" reload:"hot"` + MessagePruner MessagePrunerConfig `koanf:"message-pruner" reload:"hot"` + ForwardingTarget string `koanf:"forwarding-target"` + Forwarder execution.ForwarderConfig `koanf:"forwarder"` + TxPreChecker execution.TxPreCheckerConfig `koanf:"tx-pre-checker" reload:"hot"` + BlockValidator staker.BlockValidatorConfig `koanf:"block-validator" reload:"hot"` + RecordingDatabase arbitrum.RecordingDatabaseConfig `koanf:"recording-database"` + Feed broadcastclient.FeedConfig `koanf:"feed" reload:"hot"` + Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` + SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` + Dangerous DangerousConfig `koanf:"dangerous"` + Caching execution.CachingConfig `koanf:"caching"` + Archive bool `koanf:"archive"` + TxLookupLimit uint64 `koanf:"tx-lookup-limit"` + TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` + Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` + ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` } func (c *Config) Validate() error { - if c.L1Reader.Enable && c.Sequencer.Enable && !c.DelayedSequencer.Enable { + if c.ParentChainReader.Enable && c.Sequencer.Enable && !c.DelayedSequencer.Enable { log.Warn("delayed sequencer is not enabled, despite sequencer and l1 reader being enabled") } if c.DelayedSequencer.Enable && !c.Sequencer.Enable { @@ -343,12 +337,12 @@ func (c *Config) Validate() error { return nil } -func (c *Config) ForwardingTarget() string { - if c.ForwardingTargetImpl == "null" { +func (c *Config) ForwardingTargetF() string { + if c.ForwardingTarget == "null" { return "" } - return c.ForwardingTargetImpl + return c.ForwardingTarget } func (c *Config) ValidatorRequired() bool { @@ -369,7 +363,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed DelayedSequencerConfigAddOptions(prefix+".delayed-sequencer", f) BatchPosterConfigAddOptions(prefix+".batch-poster", f) MessagePrunerConfigAddOptions(prefix+".message-pruner", f) - f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTargetImpl, "transaction forwarding target URL, or \"null\" to disable forwarding (iff not sequencer)") + f.String(prefix+".forwarding-target", ConfigDefault.ForwardingTarget, "transaction forwarding target URL, or \"null\" to disable forwarding (if not sequencer)") execution.AddOptionsForNodeForwarderConfig(prefix+".forwarder", f) execution.TxPreCheckerConfigAddOptions(prefix+".tx-pre-checker", f) staker.BlockValidatorConfigAddOptions(prefix+".block-validator", f) @@ -391,28 +385,28 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed } var ConfigDefault = Config{ - RPC: arbitrum.DefaultConfig, - Sequencer: execution.DefaultSequencerConfig, - L1Reader: headerreader.DefaultConfig, - InboxReader: DefaultInboxReaderConfig, - DelayedSequencer: DefaultDelayedSequencerConfig, - BatchPoster: DefaultBatchPosterConfig, - MessagePruner: DefaultMessagePrunerConfig, - ForwardingTargetImpl: "", - TxPreChecker: execution.DefaultTxPreCheckerConfig, - BlockValidator: staker.DefaultBlockValidatorConfig, - RecordingDB: arbitrum.DefaultRecordingDatabaseConfig, - Feed: broadcastclient.FeedConfigDefault, - Staker: staker.DefaultL1ValidatorConfig, - SeqCoordinator: DefaultSeqCoordinatorConfig, - DataAvailability: das.DefaultDataAvailabilityConfig, - SyncMonitor: DefaultSyncMonitorConfig, - Dangerous: DefaultDangerousConfig, - Archive: false, - TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second - Caching: execution.DefaultCachingConfig, - TransactionStreamer: DefaultTransactionStreamerConfig, - ResourceManagement: resourcemanager.DefaultConfig, + RPC: arbitrum.DefaultConfig, + Sequencer: execution.DefaultSequencerConfig, + ParentChainReader: headerreader.DefaultConfig, + InboxReader: DefaultInboxReaderConfig, + DelayedSequencer: DefaultDelayedSequencerConfig, + BatchPoster: DefaultBatchPosterConfig, + MessagePruner: DefaultMessagePrunerConfig, + ForwardingTarget: "", + TxPreChecker: execution.DefaultTxPreCheckerConfig, + BlockValidator: staker.DefaultBlockValidatorConfig, + RecordingDatabase: arbitrum.DefaultRecordingDatabaseConfig, + Feed: broadcastclient.FeedConfigDefault, + Staker: staker.DefaultL1ValidatorConfig, + SeqCoordinator: DefaultSeqCoordinatorConfig, + DataAvailability: das.DefaultDataAvailabilityConfig, + SyncMonitor: DefaultSyncMonitorConfig, + Dangerous: DefaultDangerousConfig, + Archive: false, + TxLookupLimit: 126_230_400, // 1 year at 4 blocks per second + Caching: execution.DefaultCachingConfig, + TransactionStreamer: DefaultTransactionStreamerConfig, + ResourceMgmt: resourcemanager.DefaultConfig, } func ConfigDefaultL1Test() *Config { @@ -427,13 +421,14 @@ func ConfigDefaultL1Test() *Config { func ConfigDefaultL1NonSequencerTest() *Config { config := ConfigDefault - config.L1Reader = headerreader.TestConfig + config.ParentChainReader = headerreader.TestConfig config.InboxReader = TestInboxReaderConfig config.Sequencer.Enable = false config.DelayedSequencer.Enable = false config.BatchPoster.Enable = false config.SeqCoordinator.Enable = false config.BlockValidator = staker.TestBlockValidatorConfig + config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" config.Forwarder = execution.DefaultTestForwarderConfig @@ -445,12 +440,13 @@ func ConfigDefaultL1NonSequencerTest() *Config { func ConfigDefaultL2Test() *Config { config := ConfigDefault config.Sequencer = execution.TestSequencerConfig - config.L1Reader.Enable = false + config.ParentChainReader.Enable = false config.SeqCoordinator = TestSeqCoordinatorConfig - config.Feed.Input.Verifier.Dangerous.AcceptMissing = true + config.Feed.Input.Verify.Dangerous.AcceptMissing = true config.Feed.Output.Signed = false - config.SeqCoordinator.Signing.ECDSA.AcceptSequencer = false - config.SeqCoordinator.Signing.ECDSA.Dangerous.AcceptMissing = true + config.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + config.SeqCoordinator.Signer.ECDSA.Dangerous.AcceptMissing = true + config.Staker = staker.TestL1ValidatorConfig config.Staker.Enable = false config.BlockValidator.ValidationServer.URL = "" config.TransactionStreamer = DefaultTransactionStreamerConfig @@ -543,25 +539,30 @@ func checkArbDbSchemaVersion(arbDb ethdb.Database) error { return nil } -func ValidatorDataposter(db ethdb.Database, l1Reader *headerreader.HeaderReader, - transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor) (*dataposter.DataPoster, error) { +func StakerDataposter( + db ethdb.Database, l1Reader *headerreader.HeaderReader, + transactOpts *bind.TransactOpts, cfgFetcher ConfigFetcher, syncMonitor *SyncMonitor, +) (*dataposter.DataPoster, error) { + if transactOpts == nil { + return nil, nil + } cfg := cfgFetcher.Get() mdRetriever := func(ctx context.Context, blockNum *big.Int) ([]byte, error) { return nil, nil } - redisC, err := redisutil.RedisClientFromURL(cfg.BlockValidator.RedisUrl) + redisC, err := redisutil.RedisClientFromURL(cfg.Staker.RedisUrl) if err != nil { return nil, fmt.Errorf("creating redis client from url: %w", err) } lockCfgFetcher := func() *redislock.SimpleCfg { - return &cfg.BlockValidator.RedisLock + return &cfg.Staker.RedisLock } redisLock, err := redislock.NewSimple(redisC, lockCfgFetcher, func() bool { return syncMonitor.Synced() }) if err != nil { return nil, err } dpCfg := func() *dataposter.DataPosterConfig { - return &cfg.BlockValidator.DataPoster + return &cfg.Staker.DataPoster } return dataposter.NewDataPoster(db, l1Reader, transactOpts, redisC, redisLock, dpCfg, mdRetriever) } @@ -603,8 +604,9 @@ func createNodeImpl( } var l1Reader *headerreader.HeaderReader - if config.L1Reader.Enable { - l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().L1Reader }) + if config.ParentChainReader.Enable { + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err = headerreader.New(ctx, l1client, func() *headerreader.Config { return &configFetcher.Get().ParentChainReader }, arbSys) if err != nil { return nil, err } @@ -613,7 +615,7 @@ func createNodeImpl( sequencerConfigFetcher := func() *execution.SequencerConfig { return &configFetcher.Get().Sequencer } txprecheckConfigFetcher := func() *execution.TxPreCheckerConfig { return &configFetcher.Get().TxPreChecker } exec, err := execution.CreateExecutionNode(stack, chainDb, l2BlockChain, l1Reader, syncMonitor, - config.ForwardingTarget(), &config.Forwarder, config.RPC, &config.RecordingDB, + config.ForwardingTargetF(), &config.Forwarder, config.RPC, &config.RecordingDatabase, sequencerConfigFetcher, txprecheckConfigFetcher) if err != nil { return nil, err @@ -683,7 +685,7 @@ func createNodeImpl( } } - if !config.L1Reader.Enable { + if !config.ParentChainReader.Enable { return &Node{ ArbDB: arbDb, Stack: stack, @@ -802,8 +804,8 @@ func createNodeImpl( var messagePruner *MessagePruner if config.Staker.Enable { - dp, err := ValidatorDataposter( - rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), + dp, err := StakerDataposter( + rawdb.NewTable(arbDb, storage.StakerPrefix), l1Reader, txOptsValidator, configFetcher, @@ -812,6 +814,7 @@ func createNodeImpl( if err != nil { return nil, err } + getExtraGas := func() uint64 { return configFetcher.Get().Staker.ExtraGas } var wallet staker.ValidatorWalletInterface if config.Staker.UseSmartContractWallet || txOptsValidator == nil { var existingWalletAddress *common.Address @@ -823,7 +826,7 @@ func createNodeImpl( tmpAddress := common.HexToAddress(config.Staker.ContractWalletAddress) existingWalletAddress = &tmpAddress } - wallet, err = staker.NewContractValidatorWallet(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, config.BlockValidator.ExtraGas) + wallet, err = staker.NewContractValidatorWallet(dp, existingWalletAddress, deployInfo.ValidatorWalletCreator, deployInfo.Rollup, l1Reader, txOptsValidator, int64(deployInfo.DeployedAt), func(common.Address) {}, getExtraGas) if err != nil { return nil, err } @@ -831,7 +834,7 @@ func createNodeImpl( if len(config.Staker.ContractWalletAddress) > 0 { return nil, errors.New("validator contract wallet specified but flag to use a smart contract wallet was not specified") } - wallet, err = staker.NewEoaValidatorWallet(dp, deployInfo.Rollup, l1client, txOptsValidator) + wallet, err = staker.NewEoaValidatorWallet(dp, deployInfo.Rollup, l1client, txOptsValidator, getExtraGas) if err != nil { return nil, err } diff --git a/arbnode/resourcemanager/resource_management.go b/arbnode/resourcemanager/resource_management.go index acb5355987..88b7d65094 100644 --- a/arbnode/resourcemanager/resource_management.go +++ b/arbnode/resourcemanager/resource_management.go @@ -23,6 +23,7 @@ var ( limitCheckDurationHistogram = metrics.NewRegisteredHistogram("arb/rpc/limitcheck/duration", nil, metrics.NewBoundedHistogramSample()) limitCheckSuccessCounter = metrics.NewRegisteredCounter("arb/rpc/limitcheck/success", nil) limitCheckFailureCounter = metrics.NewRegisteredCounter("arb/rpc/limitcheck/failure", nil) + errNotSupported = errors.New("not supported") ) // Init adds the resource manager's httpServer to a custom hook in geth. @@ -31,9 +32,16 @@ var ( // // Must be run before the go-ethereum stack is set up (ethereum/go-ethereum/node.New). func Init(conf *Config) { - if conf.MemoryLimitPercent > 0 { + if conf.MemLimitPercent > 0 { node.WrapHTTPHandler = func(srv http.Handler) (http.Handler, error) { - return newHttpServer(srv, newLimitChecker(conf)), nil + var c limitChecker + c, err := newCgroupsMemoryLimitCheckerIfSupported(conf) + if errors.Is(err, errNotSupported) { + log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") + c = &trivialLimitChecker{} + } + + return newHttpServer(srv, c), nil } } } @@ -42,18 +50,18 @@ func Init(conf *Config) { // Currently only a memory limit is supported, other limits may be added // in the future. type Config struct { - MemoryLimitPercent int `koanf:"mem-limit-percent" reload:"hot"` + MemLimitPercent int `koanf:"mem-limit-percent" reload:"hot"` } // DefaultConfig has the defaul resourcemanager configuration, // all limits are disabled. var DefaultConfig = Config{ - MemoryLimitPercent: 0, + MemLimitPercent: 0, } // ConfigAddOptions adds the configuration options for resourcemanager. func ConfigAddOptions(prefix string, f *pflag.FlagSet) { - f.Int(prefix+".mem-limit-percent", DefaultConfig.MemoryLimitPercent, "RPC calls are throttled if system memory utilization exceeds this percent value, zero (default) is disabled") + f.Int(prefix+".mem-limit-percent", DefaultConfig.MemLimitPercent, "RPC calls are throttled if system memory utilization exceeds this percent value, zero (default) is disabled") } // httpServer implements http.Handler and wraps calls to inner with a resource @@ -90,20 +98,27 @@ type limitChecker interface { String() string } -// newLimitChecker attempts to auto-discover the mechanism by which it -// can check system limits. Currently Cgroups V1 is supported, -// with Cgroups V2 likely to be implmemented next. If no supported -// mechanism is discovered, it logs an error and fails open, ie -// it creates a trivialLimitChecker that does no checks. -func newLimitChecker(conf *Config) limitChecker { - c := newCgroupsV1MemoryLimitChecker(DefaultCgroupsV1MemoryDirectory, conf.MemoryLimitPercent) +func isSupported(c limitChecker) bool { + _, err := c.isLimitExceeded() + return err == nil +} + +// newCgroupsMemoryLimitCheckerIfSupported attempts to auto-discover whether +// Cgroups V1 or V2 is supported for checking system memory limits. +func newCgroupsMemoryLimitCheckerIfSupported(conf *Config) (*cgroupsMemoryLimitChecker, error) { + c := newCgroupsMemoryLimitChecker(cgroupsV1MemoryFiles, conf.MemLimitPercent) if isSupported(c) { log.Info("Cgroups v1 detected, enabling memory limit RPC throttling") - return c + return c, nil + } + + c = newCgroupsMemoryLimitChecker(cgroupsV2MemoryFiles, conf.MemLimitPercent) + if isSupported(c) { + log.Info("Cgroups v2 detected, enabling memory limit RPC throttling") + return c, nil } - log.Error("No method for determining memory usage and limits was discovered, disabled memory limit RPC throttling") - return &trivialLimitChecker{} + return nil, errNotSupported } // trivialLimitChecker checks no limits, so its limits are never exceeded. @@ -115,28 +130,37 @@ func (_ trivialLimitChecker) isLimitExceeded() (bool, error) { func (_ trivialLimitChecker) String() string { return "trivial" } -const DefaultCgroupsV1MemoryDirectory = "/sys/fs/cgroup/memory/" +type cgroupsMemoryFiles struct { + limitFile, usageFile, statsFile string + inactiveRe *regexp.Regexp +} -type cgroupsV1MemoryLimitChecker struct { - cgroupDir string - memoryLimitPercent int +const defaultCgroupsV1MemoryDirectory = "/sys/fs/cgroup/memory/" +const defaultCgroupsV2MemoryDirectory = "/sys/fs/cgroup/" - limitFile, usageFile, statsFile string +var cgroupsV1MemoryFiles = cgroupsMemoryFiles{ + limitFile: defaultCgroupsV1MemoryDirectory + "/memory.limit_in_bytes", + usageFile: defaultCgroupsV1MemoryDirectory + "/memory.usage_in_bytes", + statsFile: defaultCgroupsV1MemoryDirectory + "/memory.stat", + inactiveRe: regexp.MustCompile(`total_inactive_file (\d+)`), +} +var cgroupsV2MemoryFiles = cgroupsMemoryFiles{ + limitFile: defaultCgroupsV2MemoryDirectory + "/memory.max", + usageFile: defaultCgroupsV2MemoryDirectory + "/memory.current", + statsFile: defaultCgroupsV2MemoryDirectory + "/memory.stat", + inactiveRe: regexp.MustCompile(`inactive_file (\d+)`), } -func newCgroupsV1MemoryLimitChecker(cgroupDir string, memoryLimitPercent int) *cgroupsV1MemoryLimitChecker { - return &cgroupsV1MemoryLimitChecker{ - cgroupDir: cgroupDir, - memoryLimitPercent: memoryLimitPercent, - limitFile: cgroupDir + "/memory.limit_in_bytes", - usageFile: cgroupDir + "/memory.usage_in_bytes", - statsFile: cgroupDir + "/memory.stat", - } +type cgroupsMemoryLimitChecker struct { + files cgroupsMemoryFiles + memoryLimitPercent int } -func isSupported(c limitChecker) bool { - _, err := c.isLimitExceeded() - return err == nil +func newCgroupsMemoryLimitChecker(files cgroupsMemoryFiles, memoryLimitPercent int) *cgroupsMemoryLimitChecker { + return &cgroupsMemoryLimitChecker{ + files: files, + memoryLimitPercent: memoryLimitPercent, + } } // isLimitExceeded checks if the system memory used exceeds the limit @@ -145,24 +169,25 @@ func isSupported(c limitChecker) bool { // See the following page for details of calculating the memory used, // which is reported as container_memory_working_set_bytes in prometheus: // https://mihai-albert.com/2022/02/13/out-of-memory-oom-in-kubernetes-part-3-memory-metrics-sources-and-tools-to-collect-them/ -func (c *cgroupsV1MemoryLimitChecker) isLimitExceeded() (bool, error) { +func (c *cgroupsMemoryLimitChecker) isLimitExceeded() (bool, error) { var limit, usage, inactive int var err error - limit, err = readIntFromFile(c.limitFile) - if err != nil { + if limit, err = readIntFromFile(c.files.limitFile); err != nil { return false, err } - usage, err = readIntFromFile(c.usageFile) - if err != nil { + if usage, err = readIntFromFile(c.files.usageFile); err != nil { return false, err } - inactive, err = readInactive(c.statsFile) - if err != nil { + if inactive, err = readInactive(c.files.statsFile, c.files.inactiveRe); err != nil { return false, err } return usage-inactive >= ((limit * c.memoryLimitPercent) / 100), nil } +func (c cgroupsMemoryLimitChecker) String() string { + return "CgroupsMemoryLimitChecker" +} + func readIntFromFile(fileName string) (int, error) { file, err := os.Open(fileName) if err != nil { @@ -176,9 +201,7 @@ func readIntFromFile(fileName string) (int, error) { return limit, nil } -var re = regexp.MustCompile(`total_inactive_file (\d+)`) - -func readInactive(fileName string) (int, error) { +func readInactive(fileName string, re *regexp.Regexp) (int, error) { file, err := os.Open(fileName) if err != nil { return 0, err @@ -201,7 +224,3 @@ func readInactive(fileName string) (int, error) { return 0, errors.New("total_inactive_file not found in " + fileName) } - -func (c cgroupsV1MemoryLimitChecker) String() string { - return "CgroupsV1MemoryLimitChecker" -} diff --git a/arbnode/resourcemanager/resource_management_test.go b/arbnode/resourcemanager/resource_management_test.go index fe470e706b..ba791fd729 100644 --- a/arbnode/resourcemanager/resource_management_test.go +++ b/arbnode/resourcemanager/resource_management_test.go @@ -6,29 +6,28 @@ package resourcemanager import ( "fmt" "os" + "regexp" "testing" ) -func updateFakeCgroupv1Files(c *cgroupsV1MemoryLimitChecker, limit, usage, inactive int) error { - limitFile, err := os.Create(c.limitFile) +func updateFakeCgroupFiles(c *cgroupsMemoryLimitChecker, limit, usage, inactive int) error { + limitFile, err := os.Create(c.files.limitFile) if err != nil { return err } - _, err = fmt.Fprintf(limitFile, "%d\n", limit) - if err != nil { + if _, err = fmt.Fprintf(limitFile, "%d\n", limit); err != nil { return err } - usageFile, err := os.Create(c.usageFile) + usageFile, err := os.Create(c.files.usageFile) if err != nil { return err } - _, err = fmt.Fprintf(usageFile, "%d\n", usage) - if err != nil { + if _, err = fmt.Fprintf(usageFile, "%d\n", usage); err != nil { return err } - statsFile, err := os.Create(c.statsFile) + statsFile, err := os.Create(c.files.statsFile) if err != nil { return err } @@ -37,42 +36,57 @@ total_rss 1016209408 total_inactive_file %d total_active_file 321544192 `, inactive) - if err != nil { - return err - } - return nil + return err } -func TestCgroupsv1MemoryLimit(t *testing.T) { - cgroupDir := t.TempDir() - c := newCgroupsV1MemoryLimitChecker(cgroupDir, 95) - _, err := c.isLimitExceeded() - if err == nil { - t.Error("Should fail open if can't read files") +func makeCgroupsTestDir(cgroupDir string) cgroupsMemoryFiles { + return cgroupsMemoryFiles{ + limitFile: cgroupDir + "/memory.limit_in_bytes", + usageFile: cgroupDir + "/memory.usage_in_bytes", + statsFile: cgroupDir + "/memory.stat", + inactiveRe: regexp.MustCompile(`total_inactive_file (\d+)`), } +} - err = updateFakeCgroupv1Files(c, 1000, 1000, 51) - if err != nil { - t.Error(err) - } - exceeded, err := c.isLimitExceeded() - if err != nil { - t.Error(err) - } - if exceeded { - t.Error("Expected under limit") +func TestCgroupsFailIfCantOpen(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + c := newCgroupsMemoryLimitChecker(testFiles, 95) + if _, err := c.isLimitExceeded(); err == nil { + t.Fatal("Should fail open if can't read files") } +} - err = updateFakeCgroupv1Files(c, 1000, 1000, 50) - if err != nil { - t.Error(err) +func TestCgroupsMemoryLimit(t *testing.T) { + for _, tc := range []struct { + desc string + inactive int + want bool + }{ + { + desc: "limit should be exceeded", + inactive: 50, + want: true, + }, + { + desc: "limit should not be exceeded", + inactive: 51, + want: false, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + testFiles := makeCgroupsTestDir(t.TempDir()) + c := newCgroupsMemoryLimitChecker(testFiles, 95) + if err := updateFakeCgroupFiles(c, 1000, 1000, tc.inactive); err != nil { + t.Fatalf("Updating cgroup files: %v", err) + } + exceeded, err := c.isLimitExceeded() + if err != nil { + t.Fatalf("Checking if limit exceeded: %v", err) + } + if exceeded != tc.want { + t.Errorf("isLimitExceeded() = %t, want %t", exceeded, tc.want) + } + }, + ) } - exceeded, err = c.isLimitExceeded() - if err != nil { - t.Error(err) - } - if !exceeded { - t.Error("Expected over limit") - } - } diff --git a/arbnode/seq_coordinator.go b/arbnode/seq_coordinator.go index 31cab83b1f..82796f3905 100644 --- a/arbnode/seq_coordinator.go +++ b/arbnode/seq_coordinator.go @@ -58,28 +58,28 @@ type SeqCoordinator struct { } type SeqCoordinatorConfig struct { - Enable bool `koanf:"enable"` - ChosenHealthcheckAddr string `koanf:"chosen-healthcheck-addr"` - RedisUrl string `koanf:"redis-url"` - LockoutDuration time.Duration `koanf:"lockout-duration"` - LockoutSpare time.Duration `koanf:"lockout-spare"` - SeqNumDuration time.Duration `koanf:"seq-num-duration"` - UpdateInterval time.Duration `koanf:"update-interval"` - RetryInterval time.Duration `koanf:"retry-interval"` - HandoffTimeout time.Duration `koanf:"handoff-timeout"` - SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` - ReleaseRetries int `koanf:"release-retries"` - MaxMsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` - MyUrlImpl string `koanf:"my-url"` - Signing signature.SignVerifyConfig `koanf:"signer"` -} - -func (c *SeqCoordinatorConfig) MyUrl() string { - if c.MyUrlImpl == "" { + Enable bool `koanf:"enable"` + ChosenHealthcheckAddr string `koanf:"chosen-healthcheck-addr"` + RedisUrl string `koanf:"redis-url"` + LockoutDuration time.Duration `koanf:"lockout-duration"` + LockoutSpare time.Duration `koanf:"lockout-spare"` + SeqNumDuration time.Duration `koanf:"seq-num-duration"` + UpdateInterval time.Duration `koanf:"update-interval"` + RetryInterval time.Duration `koanf:"retry-interval"` + HandoffTimeout time.Duration `koanf:"handoff-timeout"` + SafeShutdownDelay time.Duration `koanf:"safe-shutdown-delay"` + ReleaseRetries int `koanf:"release-retries"` + // Max message per poll. + MsgPerPoll arbutil.MessageIndex `koanf:"msg-per-poll"` + MyUrl string `koanf:"my-url"` + Signer signature.SignVerifyConfig `koanf:"signer"` +} + +func (c *SeqCoordinatorConfig) Url() string { + if c.MyUrl == "" { return redisutil.INVALID_URL } - - return c.MyUrlImpl + return c.MyUrl } func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -94,8 +94,8 @@ func SeqCoordinatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Duration(prefix+".handoff-timeout", DefaultSeqCoordinatorConfig.HandoffTimeout, "the maximum amount of time to spend waiting for another sequencer to accept the lockout when handing it off on shutdown or db compaction") f.Duration(prefix+".safe-shutdown-delay", DefaultSeqCoordinatorConfig.SafeShutdownDelay, "if non-zero will add delay after transferring control") f.Int(prefix+".release-retries", DefaultSeqCoordinatorConfig.ReleaseRetries, "the number of times to retry releasing the wants lockout and chosen one status on shutdown") - f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MaxMsgPerPoll), "will only be marked as wanting the lockout if not too far behind") - f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrlImpl, "url for this sequencer if it is the chosen") + f.Uint64(prefix+".msg-per-poll", uint64(DefaultSeqCoordinatorConfig.MsgPerPoll), "will only be marked as wanting the lockout if not too far behind") + f.String(prefix+".my-url", DefaultSeqCoordinatorConfig.MyUrl, "url for this sequencer if it is the chosen") signature.SignVerifyConfigAddOptions(prefix+".signer", f) } @@ -111,9 +111,9 @@ var DefaultSeqCoordinatorConfig = SeqCoordinatorConfig{ SafeShutdownDelay: 5 * time.Second, ReleaseRetries: 4, RetryInterval: 50 * time.Millisecond, - MaxMsgPerPoll: 2000, - MyUrlImpl: redisutil.INVALID_URL, - Signing: signature.DefaultSignVerifyConfig, + MsgPerPoll: 2000, + MyUrl: redisutil.INVALID_URL, + Signer: signature.DefaultSignVerifyConfig, } var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ @@ -127,9 +127,9 @@ var TestSeqCoordinatorConfig = SeqCoordinatorConfig{ SafeShutdownDelay: time.Millisecond * 100, ReleaseRetries: 4, RetryInterval: time.Millisecond * 3, - MaxMsgPerPoll: 20, - MyUrlImpl: redisutil.INVALID_URL, - Signing: signature.DefaultSignVerifyConfig, + MsgPerPoll: 20, + MyUrl: redisutil.INVALID_URL, + Signer: signature.DefaultSignVerifyConfig, } func NewSeqCoordinator(dataSigner signature.DataSignerFunc, bpvalidator *contracts.BatchPosterVerifier, streamer *TransactionStreamer, sequencer *execution.Sequencer, sync *SyncMonitor, config SeqCoordinatorConfig) (*SeqCoordinator, error) { @@ -137,7 +137,7 @@ func NewSeqCoordinator(dataSigner signature.DataSignerFunc, bpvalidator *contrac if err != nil { return nil, err } - signer, err := signature.NewSignVerify(&config.Signing, dataSigner, bpvalidator) + signer, err := signature.NewSignVerify(&config.Signer, dataSigner, bpvalidator) if err != nil { return nil, err } @@ -250,7 +250,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC if err != nil { return err } - if c.config.Signing.SymmetricSign { + if c.config.Signer.SymmetricSign { messageString := string(append(msgSig, msgBytes...)) messageData = &messageString } else { @@ -278,7 +278,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC if err != nil { return err } - if !wasEmpty && (current != c.config.MyUrl()) { + if !wasEmpty && (current != c.config.Url()) { return fmt.Errorf("%w: failed to catch lock. redis shows chosen: %s", execution.ErrRetrySequencer, current) } remoteMsgCount, err := c.getRemoteMsgCountImpl(ctx, tx) @@ -300,7 +300,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC initialDuration = 2 * time.Second } if wasEmpty { - pipe.Set(ctx, redisutil.CHOSENSEQ_KEY, c.config.MyUrl(), initialDuration) + pipe.Set(ctx, redisutil.CHOSENSEQ_KEY, c.config.Url(), initialDuration) } pipe.Set(ctx, redisutil.MSG_COUNT_KEY, msgCountMsg, c.config.SeqNumDuration) if messageData != nil { @@ -311,7 +311,7 @@ func (c *SeqCoordinator) acquireLockoutAndWriteMessage(ctx context.Context, msgC } pipe.PExpireAt(ctx, redisutil.CHOSENSEQ_KEY, lockoutUntil) if setWantsLockout { - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) pipe.Set(ctx, myWantsLockoutKey, redisutil.WANTS_LOCKOUT_VAL, initialDuration) pipe.PExpireAt(ctx, myWantsLockoutKey, lockoutUntil) } @@ -362,7 +362,7 @@ func (c *SeqCoordinator) wantsLockoutUpdateWithMutex(ctx context.Context) error if c.avoidLockout > 0 { return nil } - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) wantsLockoutUntil := time.Now().Add(c.config.LockoutDuration) pipe := c.Client.TxPipeline() initialDuration := c.config.LockoutDuration @@ -390,7 +390,7 @@ func (c *SeqCoordinator) chosenOneRelease(ctx context.Context) error { if err != nil { return err } - if current != c.config.MyUrl() { + if current != c.config.Url() { return nil } pipe := tx.TxPipeline() @@ -409,7 +409,7 @@ func (c *SeqCoordinator) chosenOneRelease(ctx context.Context) error { if errors.Is(readErr, redis.Nil) { return nil } - if current != c.config.MyUrl() { + if current != c.config.Url() { return nil } return releaseErr @@ -421,7 +421,7 @@ func (c *SeqCoordinator) wantsLockoutRelease(ctx context.Context) error { if !c.reportedWantsLockout { return nil } - myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.MyUrl()) + myWantsLockoutKey := redisutil.WantsLockoutKeyFor(c.config.Url()) releaseErr := c.Client.Del(ctx, myWantsLockoutKey).Err() if releaseErr != nil { // got error - was it still deleted? @@ -450,7 +450,7 @@ func (c *SeqCoordinator) noRedisError() time.Duration { // update for the prev known-chosen sequencer (no need to load new messages) func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen string) time.Duration { - if nextChosen != "" && nextChosen != c.config.MyUrl() { + if nextChosen != "" && nextChosen != c.config.Url() { // was the active sequencer, but no longer // we maintain chosen status if we had it and nobody in the priorities wants the lockout setPrevChosenTo := nextChosen @@ -467,7 +467,7 @@ func (c *SeqCoordinator) updateWithLockout(ctx context.Context, nextChosen strin return c.retryAfterRedisError() } c.prevChosenSequencer = setPrevChosenTo - log.Info("released chosen-coordinator lock", "myUrl", c.config.MyUrl(), "nextChosen", nextChosen) + log.Info("released chosen-coordinator lock", "myUrl", c.config.Url(), "nextChosen", nextChosen) return c.noRedisError() } // Was, and still is, the active sequencer @@ -496,10 +496,10 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("coordinator failed finding sequencer wanting lockout", "err", err) return c.retryAfterRedisError() } - if c.prevChosenSequencer == c.config.MyUrl() { + if c.prevChosenSequencer == c.config.Url() { return c.updateWithLockout(ctx, chosenSeq) } - if chosenSeq != c.config.MyUrl() && chosenSeq != c.prevChosenSequencer { + if chosenSeq != c.config.Url() && chosenSeq != c.prevChosenSequencer { var err error if c.sequencer != nil { err = c.sequencer.ForwardTo(chosenSeq) @@ -526,8 +526,8 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { return c.retryAfterRedisError() } readUntil := remoteMsgCount - if readUntil > localMsgCount+c.config.MaxMsgPerPoll { - readUntil = localMsgCount + c.config.MaxMsgPerPoll + if readUntil > localMsgCount+c.config.MsgPerPoll { + readUntil = localMsgCount + c.config.MsgPerPoll } var messages []arbostypes.MessageWithMetadata msgToRead := localMsgCount @@ -599,7 +599,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } } - if c.config.MyUrl() == redisutil.INVALID_URL { + if c.config.Url() == redisutil.INVALID_URL { return c.noRedisError() } @@ -614,7 +614,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { } // can take over as main sequencer? - if synced && localMsgCount >= remoteMsgCount && chosenSeq == c.config.MyUrl() { + if synced && localMsgCount >= remoteMsgCount && chosenSeq == c.config.Url() { if c.sequencer == nil { log.Error("myurl main sequencer, but no sequencer exists") return c.noRedisError() @@ -639,7 +639,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { c.prevChosenSequencer = "" return c.retryAfterRedisError() } - log.Info("caught chosen-coordinator lock", "myUrl", c.config.MyUrl()) + log.Info("caught chosen-coordinator lock", "myUrl", c.config.Url()) if c.delayedSequencer != nil { err = c.delayedSequencer.ForceSequenceDelayed(ctx) if err != nil { @@ -651,7 +651,7 @@ func (c *SeqCoordinator) update(ctx context.Context) time.Duration { log.Warn("failed to populate the feed backlog on lockout acquisition", "err", err) } c.sequencer.Activate() - c.prevChosenSequencer = c.config.MyUrl() + c.prevChosenSequencer = c.config.Url() return c.noRedisError() } } @@ -684,7 +684,7 @@ func (c *SeqCoordinator) AvoidingLockout() bool { func (c *SeqCoordinator) DebugPrint() string { c.wantsLockoutMutex.Lock() defer c.wantsLockoutMutex.Unlock() - return fmt.Sprint("Url:", c.config.MyUrl(), + return fmt.Sprint("Url:", c.config.Url(), " prevChosenSequencer:", c.prevChosenSequencer, " reportedWantsLockout:", c.reportedWantsLockout, " lockoutUntil:", c.lockoutUntil, @@ -760,7 +760,7 @@ func (c *SeqCoordinator) StopAndWait() { // We've just stopped our normal context so we need to use our parent's context. parentCtx := c.StopWaiter.GetParentContext() for i := 0; i <= c.config.ReleaseRetries || c.config.ReleaseRetries < 0; i++ { - log.Info("releasing wants lockout key", "myUrl", c.config.MyUrl(), "attempt", i) + log.Info("releasing wants lockout key", "myUrl", c.config.Url(), "attempt", i) err := c.wantsLockoutRelease(parentCtx) if err == nil { c.noRedisError() @@ -771,7 +771,7 @@ func (c *SeqCoordinator) StopAndWait() { } } for i := 0; i < c.config.ReleaseRetries || c.config.ReleaseRetries < 0; i++ { - log.Info("releasing chosen one", "myUrl", c.config.MyUrl(), "attempt", i) + log.Info("releasing chosen one", "myUrl", c.config.Url(), "attempt", i) err := c.chosenOneRelease(parentCtx) if err == nil { c.noRedisError() @@ -804,7 +804,7 @@ func (c *SeqCoordinator) AvoidLockout(ctx context.Context) bool { c.wantsLockoutMutex.Lock() c.avoidLockout++ c.wantsLockoutMutex.Unlock() - log.Info("avoiding lockout", "myUrl", c.config.MyUrl()) + log.Info("avoiding lockout", "myUrl", c.config.Url()) err := c.wantsLockoutRelease(ctx) if err != nil { log.Error("failed to release wanting the lockout in redis", "err", err) @@ -818,7 +818,7 @@ func (c *SeqCoordinator) TryToHandoffChosenOne(ctx context.Context) bool { ctx, cancel := context.WithTimeout(ctx, c.config.HandoffTimeout) defer cancel() if c.CurrentlyChosen() { - log.Info("waiting for another sequencer to become chosen...", "timeout", c.config.HandoffTimeout, "myUrl", c.config.MyUrl()) + log.Info("waiting for another sequencer to become chosen...", "timeout", c.config.HandoffTimeout, "myUrl", c.config.Url()) success := c.waitFor(ctx, func() bool { return !c.CurrentlyChosen() }) @@ -842,7 +842,7 @@ func (c *SeqCoordinator) SeekLockout(ctx context.Context) { c.wantsLockoutMutex.Lock() defer c.wantsLockoutMutex.Unlock() c.avoidLockout-- - log.Info("seeking lockout", "myUrl", c.config.MyUrl()) + log.Info("seeking lockout", "myUrl", c.config.Url()) if c.sync.Synced() { // Even if this errors we still internally marked ourselves as wanting the lockout err := c.wantsLockoutUpdateWithMutex(ctx) diff --git a/arbnode/seq_coordinator_atomic_test.go b/arbnode/seq_coordinator_atomic_test.go index 8cc0acadae..61468a3adb 100644 --- a/arbnode/seq_coordinator_atomic_test.go +++ b/arbnode/seq_coordinator_atomic_test.go @@ -69,7 +69,7 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo timeLaunching := time.Now() // didn't sequence.. should we have succeeded? if timeLaunching.Before(holdingLockout) { - execError = fmt.Errorf("failed while holding lock %s err %w", coord.config.MyUrl(), err) + execError = fmt.Errorf("failed while holding lock %s err %w", coord.config.Url(), err) break } } @@ -79,9 +79,9 @@ func coordinatorTestThread(ctx context.Context, coord *SeqCoordinator, data *Coo continue } if data.sequencer[i] != "" { - execError = fmt.Errorf("two sequencers for same msg: submsg %d, success for %s, %s", i, data.sequencer[i], coord.config.MyUrl()) + execError = fmt.Errorf("two sequencers for same msg: submsg %d, success for %s, %s", i, data.sequencer[i], coord.config.Url()) } - data.sequencer[i] = coord.config.MyUrl() + data.sequencer[i] = coord.config.Url() } if execError != nil { data.err = execError @@ -99,16 +99,16 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { coordConfig := TestSeqCoordinatorConfig coordConfig.LockoutDuration = time.Millisecond * 100 coordConfig.LockoutSpare = time.Millisecond * 10 - coordConfig.Signing.ECDSA.AcceptSequencer = false - coordConfig.Signing.SymmetricFallback = true - coordConfig.Signing.SymmetricSign = true - coordConfig.Signing.Symmetric.Dangerous.DisableSignatureVerification = true - coordConfig.Signing.Symmetric.SigningKey = "" + coordConfig.Signer.ECDSA.AcceptSequencer = false + coordConfig.Signer.SymmetricFallback = true + coordConfig.Signer.SymmetricSign = true + coordConfig.Signer.Symmetric.Dangerous.DisableSignatureVerification = true + coordConfig.Signer.Symmetric.SigningKey = "" testData := CoordinatorTestData{ testStartRound: -1, sequencer: make([]string, messagesPerRound), } - nullSigner, err := signature.NewSignVerify(&coordConfig.Signing, nil, nil) + nullSigner, err := signature.NewSignVerify(&coordConfig.Signer, nil, nil) Require(t, err) redisUrl := redisutil.CreateTestRedis(ctx, t) @@ -121,7 +121,7 @@ func TestRedisSeqCoordinatorAtomic(t *testing.T) { for i := 0; i < NumOfThreads; i++ { config := coordConfig - config.MyUrlImpl = fmt.Sprint(i) + config.MyUrl = fmt.Sprint(i) redisCoordinator, err := redisutil.NewRedisCoordinator(config.RedisUrl) Require(t, err) coordinator := &SeqCoordinator{ diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index a8db1923a9..0f514ba9ca 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -69,7 +69,7 @@ type TransactionStreamer struct { } type TransactionStreamerConfig struct { - MaxBroadcastQueueSize int `koanf:"max-broadcaster-queue-size"` + MaxBroadcasterQueueSize int `koanf:"max-broadcaster-queue-size"` MaxReorgResequenceDepth int64 `koanf:"max-reorg-resequence-depth" reload:"hot"` ExecuteMessageLoopDelay time.Duration `koanf:"execute-message-loop-delay" reload:"hot"` } @@ -77,19 +77,19 @@ type TransactionStreamerConfig struct { type TransactionStreamerConfigFetcher func() *TransactionStreamerConfig var DefaultTransactionStreamerConfig = TransactionStreamerConfig{ - MaxBroadcastQueueSize: 1024, + MaxBroadcasterQueueSize: 1024, MaxReorgResequenceDepth: 1024, ExecuteMessageLoopDelay: time.Millisecond * 100, } var TestTransactionStreamerConfig = TransactionStreamerConfig{ - MaxBroadcastQueueSize: 10_000, + MaxBroadcasterQueueSize: 10_000, MaxReorgResequenceDepth: 128 * 1024, ExecuteMessageLoopDelay: time.Millisecond, } func TransactionStreamerConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Int(prefix+".max-broadcaster-queue-size", DefaultTransactionStreamerConfig.MaxBroadcastQueueSize, "maximum cache of pending broadcaster messages") + f.Int(prefix+".max-broadcaster-queue-size", DefaultTransactionStreamerConfig.MaxBroadcasterQueueSize, "maximum cache of pending broadcaster messages") f.Int64(prefix+".max-reorg-resequence-depth", DefaultTransactionStreamerConfig.MaxReorgResequenceDepth, "maximum number of messages to attempt to resequence on reorg (0 = never resequence, -1 = always resequence)") f.Duration(prefix+".execute-message-loop-delay", DefaultTransactionStreamerConfig.ExecuteMessageLoopDelay, "delay when polling calls to execute messages") } @@ -479,7 +479,7 @@ func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*broadcaster.B s.broadcasterQueuedMessagesActiveReorg = feedReorg } else if broadcasterQueuedMessagesPos+arbutil.MessageIndex(len(s.broadcasterQueuedMessages)) == broadcastStartPos { // Feed messages can be added directly to end of cache - maxQueueSize := s.config().MaxBroadcastQueueSize + maxQueueSize := s.config().MaxBroadcasterQueueSize if maxQueueSize == 0 || len(s.broadcasterQueuedMessages) <= maxQueueSize { s.broadcasterQueuedMessages = append(s.broadcasterQueuedMessages, messages...) } diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index e98ab08485..9f24d96765 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -189,7 +189,8 @@ func initializeRetryables(statedb *state.StateDB, rs *retryables.RetryableState, for _, r := range retryablesList { var to *common.Address if r.To != (common.Address{}) { - to = &r.To + addr := r.To + to = &addr } statedb.AddBalance(retryables.RetryableEscrowAddress(r.Id), r.Callvalue) _, err := rs.CreateRetryable(r.Id, r.Timeout, r.From, to, r.Callvalue, r.Beneficiary, r.Calldata) diff --git a/arbos/arbostypes/incomingmessage.go b/arbos/arbostypes/incomingmessage.go index e9a5466d46..04ce8ebe2e 100644 --- a/arbos/arbostypes/incomingmessage.go +++ b/arbos/arbostypes/incomingmessage.go @@ -127,14 +127,21 @@ func (msg *L1IncomingMessage) Equals(other *L1IncomingMessage) bool { return msg.Header.Equals(other.Header) && bytes.Equal(msg.L2msg, other.L2msg) } +func hashesEqual(ha, hb *common.Hash) bool { + if (ha == nil) != (hb == nil) { + return false + } + return (ha == nil && hb == nil) || *ha == *hb +} + func (h *L1IncomingMessageHeader) Equals(other *L1IncomingMessageHeader) bool { // These are all non-pointer types so it's safe to use the == operator return h.Kind == other.Kind && h.Poster == other.Poster && h.BlockNumber == other.BlockNumber && h.Timestamp == other.Timestamp && - h.RequestId == other.RequestId && - h.L1BaseFee == other.L1BaseFee + hashesEqual(h.RequestId, other.RequestId) && + arbmath.BigEquals(h.L1BaseFee, other.L1BaseFee) } func ComputeBatchGasCost(data []byte) uint64 { diff --git a/arbos/block_processor.go b/arbos/block_processor.go index fbb4b58c0a..87ecac9e77 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -192,7 +192,7 @@ func ProduceBlockAdvanced( } header := createNewHeader(lastBlockHeader, l1Info, state, chainConfig) - signer := types.MakeSigner(chainConfig, header.Number) + signer := types.MakeSigner(chainConfig, header.Number, header.Time) // Note: blockGasLeft will diverge from the actual gas left during execution in the event of invalid txs, // but it's only used as block-local representation limiting the amount of work done in a block. blockGasLeft, _ := state.L2PricingState().PerBlockGasLimit() diff --git a/arbos/engine.go b/arbos/engine.go index ebc27c0886..0014e8ab96 100644 --- a/arbos/engine.go +++ b/arbos/engine.go @@ -23,15 +23,15 @@ func (e Engine) Author(header *types.Header) (common.Address, error) { return header.Coinbase, nil } -func (e Engine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { +func (e Engine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { // TODO what verification should be done here? return nil } -func (e Engine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { +func (e Engine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { errors := make(chan error, len(headers)) for i := range headers { - errors <- e.VerifyHeader(chain, headers[i], seals[i]) + errors <- e.VerifyHeader(chain, headers[i]) } return make(chan struct{}), errors } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index 09a4692eae..0d44ac548e 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -677,7 +677,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { if version != 9 { gasPrice = p.evm.Context.BaseFee if p.msg.TxRunMode != core.MessageCommitMode && p.msg.GasFeeCap.Sign() == 0 { - gasPrice.SetInt64(0) // gasprice zero behavior + gasPrice = common.Big0 } } return gasPrice diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index ec6bb5a380..12d494a230 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -12,8 +12,8 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" ) type L1Interface interface { @@ -88,7 +88,7 @@ func DetailTxError(ctx context.Context, client L1Interface, tx *types.Transactio } _, err = SendTxAsCall(ctx, client, tx, from, txRes.BlockNumber, true) if err == nil { - return fmt.Errorf("%w for tx hash %v", core.ErrGasLimitReached, tx.Hash()) + return fmt.Errorf("%w for tx hash %v", vm.ErrOutOfGas, tx.Hash()) } return fmt.Errorf("SendTxAsCall got: %w for tx hash %v", err, tx.Hash()) } diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index f78ef2aa9f..c4277c2ef6 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -68,13 +68,13 @@ type Config struct { RequireChainId bool `koanf:"require-chain-id" reload:"hot"` RequireFeedVersion bool `koanf:"require-feed-version" reload:"hot"` Timeout time.Duration `koanf:"timeout" reload:"hot"` - URLs []string `koanf:"url"` - Verifier signature.VerifierConfig `koanf:"verify"` + URL []string `koanf:"url"` + Verify signature.VerifierConfig `koanf:"verify"` EnableCompression bool `koanf:"enable-compression" reload:"hot"` } func (c *Config) Enable() bool { - return len(c.URLs) > 0 && c.URLs[0] != "" + return len(c.URL) > 0 && c.URL[0] != "" } type ConfigFetcher func() *Config @@ -85,7 +85,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".require-chain-id", DefaultConfig.RequireChainId, "require chain id to be present on connect") f.Bool(prefix+".require-feed-version", DefaultConfig.RequireFeedVersion, "require feed version to be present on connect") f.Duration(prefix+".timeout", DefaultConfig.Timeout, "duration to wait before timing out connection to sequencer feed") - f.StringSlice(prefix+".url", DefaultConfig.URLs, "URL of sequencer feed source") + f.StringSlice(prefix+".url", DefaultConfig.URL, "URL of sequencer feed source") signature.FeedVerifierConfigAddOptions(prefix+".verify", f) f.Bool(prefix+".enable-compression", DefaultConfig.EnableCompression, "enable per message deflate compression support") } @@ -95,8 +95,8 @@ var DefaultConfig = Config{ ReconnectMaximumBackoff: time.Second * 64, RequireChainId: false, RequireFeedVersion: false, - Verifier: signature.DefultFeedVerifierConfig, - URLs: []string{""}, + Verify: signature.DefultFeedVerifierConfig, + URL: []string{""}, Timeout: 20 * time.Second, EnableCompression: true, } @@ -106,8 +106,8 @@ var DefaultTestConfig = Config{ ReconnectMaximumBackoff: 0, RequireChainId: false, RequireFeedVersion: false, - Verifier: signature.DefultFeedVerifierConfig, - URLs: []string{""}, + Verify: signature.DefultFeedVerifierConfig, + URL: []string{""}, Timeout: 200 * time.Millisecond, EnableCompression: true, } @@ -156,7 +156,7 @@ func NewBroadcastClient( bpVerifier contracts.BatchPosterVerifierInterface, adjustCount func(int32), ) (*BroadcastClient, error) { - sigVerifier, err := signature.NewVerifier(&config().Verifier, bpVerifier) + sigVerifier, err := signature.NewVerifier(&config().Verify, bpVerifier) if err != nil { return nil, err } @@ -480,7 +480,7 @@ func (bc *BroadcastClient) StopAndWait() { } func (bc *BroadcastClient) isValidSignature(ctx context.Context, message *broadcaster.BroadcastFeedMessage) error { - if bc.config().Verifier.Dangerous.AcceptMissing && bc.sigVerifier == nil { + if bc.config().Verify.Dangerous.AcceptMissing && bc.sigVerifier == nil { // Verifier disabled return nil } diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 871d9d8d8a..5912749bf0 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -202,10 +202,10 @@ func newTestBroadcastClient(config Config, listenerAddress net.Addr, chainId uin port := listenerAddress.(*net.TCPAddr).Port var bpv contracts.BatchPosterVerifierInterface if validAddr != nil { - config.Verifier.AcceptSequencer = true + config.Verify.AcceptSequencer = true bpv = contracts.NewMockBatchPosterVerifier(*validAddr) } else { - config.Verifier.AcceptSequencer = false + config.Verify.AcceptSequencer = false } return NewBroadcastClient(func() *Config { return &config }, fmt.Sprintf("ws://127.0.0.1:%d/", port), chainId, currentMessageCount, txStreamer, confirmedSequenceNumberListener, feedErrChan, bpv, func(_ int32) {}) } diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index baf7cf6394..9fc2e6348c 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -31,7 +31,7 @@ func NewBroadcastClients( bpVerifier contracts.BatchPosterVerifierInterface, ) (*BroadcastClients, error) { config := configFetcher() - urlCount := len(config.URLs) + urlCount := len(config.URL) if urlCount <= 0 { return nil, nil } @@ -39,7 +39,7 @@ func NewBroadcastClients( clients := BroadcastClients{} clients.clients = make([]*broadcastclient.BroadcastClient, 0, urlCount) var lastClientErr error - for _, address := range config.URLs { + for _, address := range config.URL { client, err := broadcastclient.NewBroadcastClient( configFetcher, address, diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index bde80c93d1..c3f4c62ce0 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -61,7 +61,7 @@ type ConfirmedSequenceNumberMessage struct { } func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId uint64, feedErrChan chan error, dataSigner signature.DataSignerFunc) *Broadcaster { - catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }) + catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }, func() int { return config().MaxCatchup }) return &Broadcaster{ server: wsbroadcastserver.NewWSBroadcastServer(config, catchupBuffer, chainId, feedErrChan), catchupBuffer: catchupBuffer, diff --git a/broadcaster/sequencenumbercatchupbuffer.go b/broadcaster/sequencenumbercatchupbuffer.go index 7664f1b8da..bdd3e60c5b 100644 --- a/broadcaster/sequencenumbercatchupbuffer.go +++ b/broadcaster/sequencenumbercatchupbuffer.go @@ -29,11 +29,13 @@ type SequenceNumberCatchupBuffer struct { messages []*BroadcastFeedMessage messageCount int32 limitCatchup func() bool + maxCatchup func() int } -func NewSequenceNumberCatchupBuffer(limitCatchup func() bool) *SequenceNumberCatchupBuffer { +func NewSequenceNumberCatchupBuffer(limitCatchup func() bool, maxCatchup func() int) *SequenceNumberCatchupBuffer { return &SequenceNumberCatchupBuffer{ limitCatchup: limitCatchup, + maxCatchup: maxCatchup, } } @@ -98,6 +100,15 @@ func (b *SequenceNumberCatchupBuffer) OnRegisterClient(clientConnection *wsbroad return nil, bmCount, time.Since(start) } +// Takes as input an index into the messages array, not a message index +func (b *SequenceNumberCatchupBuffer) pruneBufferToIndex(idx int) { + b.messages = b.messages[idx:] + if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { + // Too much spare capacity, copy to fresh slice to reset memory usage + b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) + } +} + func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber arbutil.MessageIndex) { if len(b.messages) == 0 { return @@ -126,11 +137,7 @@ func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber ar return } - b.messages = b.messages[confirmedIndex+1:] - if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { - // Too much spare capacity, copy to fresh slice to reset memory usage - b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) - } + b.pruneBufferToIndex(int(confirmedIndex) + 1) } func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { @@ -147,6 +154,12 @@ func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { confirmedSequenceNumberGauge.Update(int64(confirmMsg.SequenceNumber)) } + maxCatchup := b.maxCatchup() + if maxCatchup == 0 { + b.messages = nil + return nil + } + for _, newMsg := range broadcastMessage.Messages { if len(b.messages) == 0 { // Add to empty list @@ -167,6 +180,10 @@ func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { } } + if maxCatchup >= 0 && len(b.messages) > maxCatchup { + b.pruneBufferToIndex(len(b.messages) - maxCatchup) + } + return nil } diff --git a/broadcaster/sequencenumbercatchupbuffer_test.go b/broadcaster/sequencenumbercatchupbuffer_test.go index 40fae9875f..fc6655057e 100644 --- a/broadcaster/sequencenumbercatchupbuffer_test.go +++ b/broadcaster/sequencenumbercatchupbuffer_test.go @@ -22,6 +22,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestGetEmptyCacheMessages(t *testing.T) { @@ -29,6 +30,7 @@ func TestGetEmptyCacheMessages(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Get everything @@ -60,6 +62,7 @@ func TestGetCacheMessages(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Get everything @@ -110,6 +113,7 @@ func TestDeleteConfirmedNil(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } buffer.deleteConfirmed(0) @@ -124,6 +128,7 @@ func TestDeleteConfirmInvalidOrder(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm before cache @@ -139,6 +144,7 @@ func TestDeleteConfirmed(t *testing.T) { messages: createDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm older than cache @@ -154,6 +160,7 @@ func TestDeleteFreeMem(t *testing.T) { messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } // Confirm older than cache @@ -169,6 +176,7 @@ func TestBroadcastBadMessage(t *testing.T) { messages: nil, messageCount: 0, limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } var foo int @@ -187,6 +195,7 @@ func TestBroadcastPastSeqNum(t *testing.T) { messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } bm := BroadcastMessage{ @@ -208,6 +217,8 @@ func TestBroadcastFutureSeqNum(t *testing.T) { buffer := SequenceNumberCatchupBuffer{ messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), + limitCatchup: func() bool { return false }, + maxCatchup: func() int { return -1 }, } bm := BroadcastMessage{ @@ -223,3 +234,38 @@ func TestBroadcastFutureSeqNum(t *testing.T) { } } + +func TestMaxCatchupBufferSize(t *testing.T) { + limit := 5 + buffer := SequenceNumberCatchupBuffer{ + messages: nil, + messageCount: 0, + limitCatchup: func() bool { return false }, + maxCatchup: func() int { return limit }, + } + + firstMessage := 10 + for i := firstMessage; i <= 20; i += 2 { + bm := BroadcastMessage{ + Messages: []*BroadcastFeedMessage{ + { + SequenceNumber: arbutil.MessageIndex(i), + }, + { + SequenceNumber: arbutil.MessageIndex(i + 1), + }, + }, + } + err := buffer.OnDoBroadcast(bm) + Require(t, err) + haveMessages := buffer.getCacheMessages(0) + expectedCount := arbmath.MinInt(i+len(bm.Messages)-firstMessage, limit) + if len(haveMessages.Messages) != expectedCount { + t.Errorf("after broadcasting messages %v and %v, expected to have %v messages but got %v", i, i+1, expectedCount, len(haveMessages.Messages)) + } + expectedFirstMessage := arbutil.MessageIndex(arbmath.MaxInt(firstMessage, i+len(bm.Messages)-limit)) + if haveMessages.Messages[0].SequenceNumber != expectedFirstMessage { + t.Errorf("after broadcasting messages %v and %v, expected the first message to be %v but got %v", i, i+1, expectedFirstMessage, haveMessages.Messages[0].SequenceNumber) + } + } +} diff --git a/cmd/chaininfo/arbitrum_chain_info.json b/cmd/chaininfo/arbitrum_chain_info.json index 5352f9760f..e66774d426 100644 --- a/cmd/chaininfo/arbitrum_chain_info.json +++ b/cmd/chaininfo/arbitrum_chain_info.json @@ -2,6 +2,7 @@ { "chain-name": "arb1", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://arb1-sequencer.arbitrum.io/rpc", "feed-url": "wss://arb1.arbitrum.io/feed", "has-genesis-state": true, @@ -51,6 +52,7 @@ { "chain-name": "nova", "parent-chain-id": 1, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://nova.arbitrum.io/rpc", "feed-url": "wss://nova.arbitrum.io/feed", "das-index-url": "https://nova.arbitrum.io/das-servers", @@ -100,6 +102,7 @@ { "chain-name": "goerli-rollup", "parent-chain-id": 5, + "parent-chain-is-arbitrum": false, "sequencer-url": "https://goerli-rollup.arbitrum.io/rpc", "feed-url": "wss://goerli-rollup.arbitrum.io/feed", "chain-config": @@ -214,5 +217,55 @@ "GenesisBlockNum": 0 } } + }, + { + "chain-id": 421614, + "parent-chain-id": 11155111, + "parent-chain-is-arbitrum": false, + "chain-name": "sepolia-rollup", + "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc", + "feed-url": "wss://sepolia-rollup.arbitrum.io/feed", + "chain-config": + { + "chainId": 421614, + "homesteadBlock": 0, + "daoForkBlock": null, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "clique": + { + "period": 0, + "epoch": 0 + }, + "arbitrum": + { + "EnableArbOS": true, + "AllowDebugPrecompiles": false, + "DataAvailabilityCommittee": false, + "InitialArbOSVersion": 10, + "InitialChainOwner": "0x71B61c2E250AFa05dFc36304D6c91501bE0965D8", + "GenesisBlockNum": 0 + } + }, + "rollup": + { + "bridge": "0x38f918D0E9F1b721EDaA41302E399fa1B79333a9", + "inbox": "0xaAe29B0366299461418F5324a79Afc425BE5ae21", + "sequencer-inbox": "0x6c97864CE4bEf387dE0b3310A44230f7E3F1be0D", + "rollup": "0xd80810638dbDF9081b72C1B33c65375e807281C8", + "validator-utils": "0x1f6860C3cac255fFFa72B7410b1183c3a0D261e0", + "validator-wallet-creator": "0x894fC71fA0A666352824EC954B401573C861D664", + "deployed-at": 4139226 + } } -] \ No newline at end of file +] diff --git a/cmd/chaininfo/chain_info.go b/cmd/chaininfo/chain_info.go index c9ffca9830..f75779b4aa 100644 --- a/cmd/chaininfo/chain_info.go +++ b/cmd/chaininfo/chain_info.go @@ -18,8 +18,9 @@ import ( var DefaultChainInfo []byte type ChainInfo struct { - ChainName string `json:"chain-name"` - ParentChainId uint64 `json:"parent-chain-id"` + ChainName string `json:"chain-name"` + ParentChainId uint64 `json:"parent-chain-id"` + ParentChainIsArbitrum *bool `json:"parent-chain-is-arbitrum"` // This is the forwarding target to submit transactions to, called the sequencer URL for clarity SequencerUrl string `json:"sequencer-url"` FeedUrl string `json:"feed-url"` diff --git a/cmd/conf/chain.go b/cmd/conf/chain.go index 54b6176f96..505957f45e 100644 --- a/cmd/conf/chain.go +++ b/cmd/conf/chain.go @@ -12,7 +12,7 @@ import ( ) type L1Config struct { - ChainID uint64 `koanf:"id"` + ID uint64 `koanf:"id"` Connection rpcclient.ClientConfig `koanf:"connection" reload:"hot"` Wallet genericconf.WalletConfig `koanf:"wallet"` } @@ -25,21 +25,21 @@ var L1ConnectionConfigDefault = rpcclient.ClientConfig{ } var L1ConfigDefault = L1Config{ - ChainID: 0, + ID: 0, Connection: L1ConnectionConfigDefault, Wallet: DefaultL1WalletConfig, } var DefaultL1WalletConfig = genericconf.WalletConfig{ Pathname: "wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, } func L1ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L1ConfigDefault.ChainID, "if set other than 0, will be used to validate database and L1 connection") + f.Uint64(prefix+".id", L1ConfigDefault.ID, "if set other than 0, will be used to validate database and L1 connection") rpcclient.RPCClientAddOptions(prefix+".connection", f, &L1ConfigDefault.Connection) genericconf.WalletConfigAddOptions(prefix+".wallet", f, L1ConfigDefault.Wallet.Pathname) } @@ -53,35 +53,35 @@ func (c *L1Config) Validate() error { } type L2Config struct { - ChainID uint64 `koanf:"id"` - ChainName string `koanf:"name"` - ChainInfoFiles []string `koanf:"info-files"` - ChainInfoJson string `koanf:"info-json"` - DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` - ChainInfoIpfsUrl string `koanf:"info-ipfs-url"` - ChainInfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` + ID uint64 `koanf:"id"` + Name string `koanf:"name"` + InfoFiles []string `koanf:"info-files"` + InfoJson string `koanf:"info-json"` + DevWallet genericconf.WalletConfig `koanf:"dev-wallet"` + InfoIpfsUrl string `koanf:"info-ipfs-url"` + InfoIpfsDownloadPath string `koanf:"info-ipfs-download-path"` } var L2ConfigDefault = L2Config{ - ChainID: 0, - ChainName: "", - ChainInfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go - ChainInfoJson: "", - DevWallet: genericconf.WalletConfigDefault, - ChainInfoIpfsUrl: "", - ChainInfoIpfsDownloadPath: "/tmp/", + ID: 0, + Name: "", + InfoFiles: []string{}, // Default file used is chaininfo/arbitrum_chain_info.json, stored in DefaultChainInfo in chain_info.go + InfoJson: "", + DevWallet: genericconf.WalletConfigDefault, + InfoIpfsUrl: "", + InfoIpfsDownloadPath: "/tmp/", } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L2ConfigDefault.ChainID, "L2 chain ID (determines Arbitrum network)") - f.String(prefix+".name", L2ConfigDefault.ChainName, "L2 chain name (determines Arbitrum network)") - f.StringSlice(prefix+".info-files", L2ConfigDefault.ChainInfoFiles, "L2 chain info json files") - f.String(prefix+".info-json", L2ConfigDefault.ChainInfoJson, "L2 chain info in json string format") + f.Uint64(prefix+".id", L2ConfigDefault.ID, "L2 chain ID (determines Arbitrum network)") + f.String(prefix+".name", L2ConfigDefault.Name, "L2 chain name (determines Arbitrum network)") + f.StringSlice(prefix+".info-files", L2ConfigDefault.InfoFiles, "L2 chain info json files") + f.String(prefix+".info-json", L2ConfigDefault.InfoJson, "L2 chain info in json string format") // Dev wallet does not exist unless specified genericconf.WalletConfigAddOptions(prefix+".dev-wallet", f, "") - f.String(prefix+".info-ipfs-url", L2ConfigDefault.ChainInfoIpfsUrl, "url to download chain info file") - f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.ChainInfoIpfsDownloadPath, "path to save temp downloaded file") + f.String(prefix+".info-ipfs-url", L2ConfigDefault.InfoIpfsUrl, "url to download chain info file") + f.String(prefix+".info-ipfs-download-path", L2ConfigDefault.InfoIpfsDownloadPath, "path to save temp downloaded file") } diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index ba0451b0e0..335aba6a1b 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -17,6 +17,7 @@ import ( flag "github.com/spf13/pflag" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" @@ -24,6 +25,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -38,10 +40,10 @@ type DAServerConfig struct { RESTPort uint64 `koanf:"rest-port"` RESTServerTimeouts genericconf.HTTPServerTimeoutConfig `koanf:"rest-server-timeouts"` - DAConf das.DataAvailabilityConfig `koanf:"data-availability"` + DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` - LogLevel int `koanf:"log-level"` + Conf genericconf.ConfConfig `koanf:"conf"` + LogLevel int `koanf:"log-level"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -58,8 +60,8 @@ var DefaultDAServerConfig = DAServerConfig{ RESTAddr: "localhost", RESTPort: 9877, RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, - DAConf: das.DefaultDataAvailabilityConfig, - ConfConfig: genericconf.ConfConfigDefault, + DataAvailability: das.DefaultDataAvailabilityConfig, + Conf: genericconf.ConfConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, PProf: false, @@ -109,7 +111,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { if err := confighelpers.EndCommonParse(k, &serverConfig); err != nil { return nil, err } - if serverConfig.ConfConfig.Dump { + if serverConfig.Conf.Dump { err = confighelpers.DumpConfig(k, map[string]interface{}{ "data-availability.key.priv-key": "", }) @@ -191,22 +193,23 @@ func startup() error { defer cancel() var l1Reader *headerreader.HeaderReader - if serverConfig.DAConf.L1NodeURL != "" && serverConfig.DAConf.L1NodeURL != "none" { - l1Client, err := das.GetL1Client(ctx, serverConfig.DAConf.L1ConnectionAttempts, serverConfig.DAConf.L1NodeURL) + if serverConfig.DataAvailability.ParentChainNodeURL != "" && serverConfig.DataAvailability.ParentChainNodeURL != "none" { + l1Client, err := das.GetL1Client(ctx, serverConfig.DataAvailability.ParentChainConnectionAttempts, serverConfig.DataAvailability.ParentChainNodeURL) if err != nil { return err } - l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }) // TODO: config + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err = headerreader.New(ctx, l1Client, func() *headerreader.Config { return &headerreader.DefaultConfig }, arbSys) // TODO: config if err != nil { return err } } var seqInboxAddress *common.Address - if serverConfig.DAConf.SequencerInboxAddress == "none" { + if serverConfig.DataAvailability.SequencerInboxAddress == "none" { seqInboxAddress = nil - } else if len(serverConfig.DAConf.SequencerInboxAddress) > 0 { - seqInboxAddress, err = das.OptionalAddressFromString(serverConfig.DAConf.SequencerInboxAddress) + } else if len(serverConfig.DataAvailability.SequencerInboxAddress) > 0 { + seqInboxAddress, err = das.OptionalAddressFromString(serverConfig.DataAvailability.SequencerInboxAddress) if err != nil { return err } @@ -217,7 +220,7 @@ func startup() error { return errors.New("sequencer-inbox-address must be set to a valid L1 URL and contract address, or 'none'") } - daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DAConf, l1Reader, seqInboxAddress) + daReader, daWriter, daHealthChecker, dasLifecycleManager, err := das.CreateDAComponentsForDaserver(ctx, &serverConfig.DataAvailability, l1Reader, seqInboxAddress) if err != nil { return err } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 6f975ec712..d20a5b52cd 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -84,14 +84,13 @@ func startClient(args []string) error { // datool client rpc store type ClientStoreConfig struct { - URL string `koanf:"url"` - Message string `koanf:"message"` - RandomMessageSize int `koanf:"random-message-size"` - DASRetentionPeriod time.Duration `koanf:"das-retention-period"` - SigningKey string `koanf:"signing-key"` - SigningWallet string `koanf:"signing-wallet"` - SigningWalletPassword string `koanf:"signing-wallet-password"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + Message string `koanf:"message"` + RandomMessageSize int `koanf:"random-message-size"` + DASRetentionPeriod time.Duration `koanf:"das-retention-period"` + SigningKey string `koanf:"signing-key"` + SigningWallet string `koanf:"signing-wallet"` + SigningWalletPassword string `koanf:"signing-wallet-password"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -151,7 +150,7 @@ func startClientStore(args []string) error { } else if config.SigningWallet != "" { walletConf := &genericconf.WalletConfig{ Pathname: config.SigningWallet, - PasswordImpl: config.SigningWalletPassword, + Password: config.SigningWalletPassword, PrivateKey: "", Account: "", OnlyCreateKey: false, @@ -196,9 +195,8 @@ func startClientStore(args []string) error { // datool client rest getbyhash type RESTClientGetByHashConfig struct { - URL string `koanf:"url"` - DataHash string `koanf:"data-hash"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + URL string `koanf:"url"` + DataHash string `koanf:"data-hash"` } func parseRESTClientGetByHashConfig(args []string) (*RESTClientGetByHashConfig, error) { @@ -257,10 +255,11 @@ func startRESTClientGetByHash(args []string) error { // das keygen type KeyGenConfig struct { - Dir string - ConfConfig genericconf.ConfConfig `koanf:"conf"` - ECDSAMode bool `koanf:"ecdsa"` - WalletMode bool `koanf:"wallet"` + Dir string + // ECDSA mode. + ECDSA bool `koanf:"ecdsa"` + // Wallet mode. + Wallet bool `koanf:"wallet"` } func parseKeyGenConfig(args []string) (*KeyGenConfig, error) { @@ -288,18 +287,18 @@ func startKeyGen(args []string) error { return err } - if !config.ECDSAMode { + if !config.ECDSA { _, _, err = das.GenerateAndStoreKeys(config.Dir) if err != nil { return err } return nil - } else if !config.WalletMode { + } else if !config.Wallet { return das.GenerateAndStoreECDSAKeys(config.Dir) } else { walletConf := &genericconf.WalletConfig{ Pathname: config.Dir, - PasswordImpl: genericconf.PASSWORD_NOT_SET, // This causes a prompt for the password + Password: genericconf.PASSWORD_NOT_SET, // This causes a prompt for the password PrivateKey: "", Account: "", OnlyCreateKey: true, @@ -333,7 +332,7 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { return nil, err } - if config.ConfConfig.Dump { + if config.Conf.Dump { c, err := k.Marshal(koanfjson.Parser()) if err != nil { return nil, fmt.Errorf("unable to marshal config file to JSON: %w", err) @@ -343,10 +342,10 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { os.Exit(0) } - if config.KeysetConfig.AssumedHonest == 0 { + if config.Keyset.AssumedHonest == 0 { return nil, errors.New("--keyset.assumed-honest must be set") } - if config.KeysetConfig.Backends == "" { + if config.Keyset.Backends == "" { return nil, errors.New("--keyset.backends must be set") } @@ -356,8 +355,8 @@ func parseDumpKeyset(args []string) (*DumpKeysetConfig, error) { // das keygen type DumpKeysetConfig struct { - KeysetConfig das.AggregatorConfig `koanf:"keyset"` - ConfConfig genericconf.ConfConfig `koanf:"conf"` + Keyset das.AggregatorConfig `koanf:"keyset"` + Conf genericconf.ConfConfig `koanf:"conf"` } func dumpKeyset(args []string) error { @@ -366,12 +365,12 @@ func dumpKeyset(args []string) error { return err } - services, err := das.ParseServices(config.KeysetConfig) + services, err := das.ParseServices(config.Keyset) if err != nil { return err } - keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.KeysetConfig.AssumedHonest)) + keysetHash, keysetBytes, err := das.KeysetHashFromServices(services, uint64(config.Keyset.AssumedHonest)) if err != nil { return err } diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 43906eb98e..d687821e8b 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -14,10 +14,12 @@ import ( "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator/server_common" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -64,10 +66,10 @@ func main() { } wallet := genericconf.WalletConfig{ - Pathname: *l1keystore, - Account: *deployAccount, - PasswordImpl: *l1passphrase, - PrivateKey: *l1privatekey, + Pathname: *l1keystore, + Account: *deployAccount, + Password: *l1passphrase, + PrivateKey: *l1privatekey, } l1TransactionOpts, _, err := util.OpenWallet("l1", &wallet, l1ChainId) if err != nil { @@ -127,13 +129,20 @@ func main() { panic(fmt.Errorf("failed to deserialize chain config: %w", err)) } + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerReaderConfig }, arbSys) + if err != nil { + panic(fmt.Errorf("failed to create header reader: %w", err)) + } + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + deployedAddresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, l1TransactionOpts, sequencerAddress, *authorizevalidators, - func() *headerreader.Config { return &headerReaderConfig }, arbnode.GenerateRollupConfig(*prod, moduleRoot, ownerAddress, &chainConfig, chainConfigJson, loserEscrowAddress), ) if err != nil { @@ -148,12 +157,14 @@ func main() { if err := os.WriteFile(*outfile, deployData, 0600); err != nil { panic(err) } + parentChainIsArbitrum := l1Reader.IsParentChainArbitrum() chainsInfo := []chaininfo.ChainInfo{ { - ChainName: *l2ChainName, - ParentChainId: l1ChainId.Uint64(), - ChainConfig: &chainConfig, - RollupAddresses: deployedAddresses, + ChainName: *l2ChainName, + ParentChainId: l1ChainId.Uint64(), + ParentChainIsArbitrum: &parentChainIsArbitrum, + ChainConfig: &chainConfig, + RollupAddresses: deployedAddresses, }, } chainsInfoJson, err := json.Marshal(chainsInfo) diff --git a/cmd/genericconf/wallet.go b/cmd/genericconf/wallet.go index 6e6f30e0c5..e05452e3b3 100644 --- a/cmd/genericconf/wallet.go +++ b/cmd/genericconf/wallet.go @@ -14,22 +14,22 @@ const PASSWORD_NOT_SET = "PASSWORD_NOT_SET" type WalletConfig struct { Pathname string `koanf:"pathname"` - PasswordImpl string `koanf:"password"` + Password string `koanf:"password"` PrivateKey string `koanf:"private-key"` Account string `koanf:"account"` OnlyCreateKey bool `koanf:"only-create-key"` } -func (w *WalletConfig) Password() *string { - if w.PasswordImpl == PASSWORD_NOT_SET { +func (w *WalletConfig) Pwd() *string { + if w.Password == PASSWORD_NOT_SET { return nil } - return &w.PasswordImpl + return &w.Password } var WalletConfigDefault = WalletConfig{ Pathname: "", - PasswordImpl: PASSWORD_NOT_SET, + Password: PASSWORD_NOT_SET, PrivateKey: "", Account: "", OnlyCreateKey: false, @@ -37,7 +37,7 @@ var WalletConfigDefault = WalletConfig{ func WalletConfigAddOptions(prefix string, f *flag.FlagSet, defaultPathname string) { f.String(prefix+".pathname", defaultPathname, "pathname for wallet") - f.String(prefix+".password", WalletConfigDefault.PasswordImpl, "wallet passphrase") + f.String(prefix+".password", WalletConfigDefault.Password, "wallet passphrase") f.String(prefix+".private-key", WalletConfigDefault.PrivateKey, "private key for wallet") f.String(prefix+".account", WalletConfigDefault.Account, "account to use (default is first account in keystore)") f.Bool(prefix+".only-create-key", WalletConfigDefault.OnlyCreateKey, "if true, creates new key then exits") diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 12a359cfa4..cf10787d6d 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -27,7 +27,7 @@ type ValidationNodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` - AuthRPC genericconf.AuthRPCConfig `koanf:"auth"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` PProf bool `koanf:"pprof"` @@ -66,7 +66,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ HTTP: HTTPConfigDefault, WS: WSConfigDefault, IPC: IPCConfigDefault, - AuthRPC: genericconf.AuthRPCConfigDefault, + Auth: genericconf.AuthRPCConfigDefault, Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, PProf: false, diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 503b016025..a461a36900 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -68,7 +68,7 @@ func mainImpl() int { stackConf.DataDir = "" // ephemeral nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) - nodeConfig.AuthRPC.Apply(&stackConf) + nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) stackConf.P2P.ListenAddr = "" stackConf.P2P.NoDial = true diff --git a/cmd/nitro/config_test.go b/cmd/nitro/config_test.go index dad91ef936..4b99b798ee 100644 --- a/cmd/nitro/config_test.go +++ b/cmd/nitro/config_test.go @@ -85,7 +85,7 @@ func TestReloads(t *testing.T) { // check that non-reloadable fields fail assignment update.Metrics = !update.Metrics testUnsafe() - update.L2.ChainID++ + update.ParentChain.ID++ testUnsafe() update.Node.Sequencer.Forwarder.ConnectionTimeout++ testUnsafe() @@ -122,7 +122,7 @@ func TestLiveNodeConfig(t *testing.T) { // check that an invalid reload gets rejected update = config.ShallowClone() - update.L2.ChainID++ + update.ParentChain.ID++ if liveConfig.Set(update) == nil { Fail(t, "failed to reject invalid update") } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index a1df2cbb2f..bdba7c1210 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -51,7 +51,7 @@ type InitConfig struct { DownloadPath string `koanf:"download-path"` DownloadPoll time.Duration `koanf:"download-poll"` DevInit bool `koanf:"dev-init"` - DevInitAddr string `koanf:"dev-init-address"` + DevInitAddress string `koanf:"dev-init-address"` DevInitBlockNum uint64 `koanf:"dev-init-blocknum"` Empty bool `koanf:"empty"` AccountsPerSync uint `koanf:"accounts-per-sync"` @@ -59,7 +59,7 @@ type InitConfig struct { ThenQuit bool `koanf:"then-quit"` Prune string `koanf:"prune"` PruneBloomSize uint64 `koanf:"prune-bloom-size"` - ResetToMsg int64 `koanf:"reset-to-message"` + ResetToMessage int64 `koanf:"reset-to-message"` } var InitConfigDefault = InitConfig{ @@ -68,14 +68,14 @@ var InitConfigDefault = InitConfig{ DownloadPath: "/tmp/", DownloadPoll: time.Minute, DevInit: false, - DevInitAddr: "", + DevInitAddress: "", DevInitBlockNum: 0, ImportFile: "", AccountsPerSync: 100000, ThenQuit: false, Prune: "", PruneBloomSize: 2048, - ResetToMsg: -1, + ResetToMessage: -1, } func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { @@ -84,15 +84,15 @@ func InitConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".download-path", InitConfigDefault.DownloadPath, "path to save temp downloaded file") f.Duration(prefix+".download-poll", InitConfigDefault.DownloadPoll, "how long to wait between polling attempts") f.Bool(prefix+".dev-init", InitConfigDefault.DevInit, "init with dev data (1 account with balance) instead of file import") - f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddr, "Address of dev-account. Leave empty to use the dev-wallet.") + f.String(prefix+".dev-init-address", InitConfigDefault.DevInitAddress, "Address of dev-account. Leave empty to use the dev-wallet.") f.Uint64(prefix+".dev-init-blocknum", InitConfigDefault.DevInitBlockNum, "Number of preinit blocks. Must exist in ancient database.") - f.Bool(prefix+".empty", InitConfigDefault.DevInit, "init with empty state") + f.Bool(prefix+".empty", InitConfigDefault.Empty, "init with empty state") f.Bool(prefix+".then-quit", InitConfigDefault.ThenQuit, "quit after init is done") f.String(prefix+".import-file", InitConfigDefault.ImportFile, "path for json data to import") f.Uint(prefix+".accounts-per-sync", InitConfigDefault.AccountsPerSync, "during init - sync database every X accounts. Lower value for low-memory systems. 0 disables.") f.String(prefix+".prune", InitConfigDefault.Prune, "pruning for a given use: \"full\" for full nodes serving RPC requests, or \"validator\" for validators") f.Uint64(prefix+".prune-bloom-size", InitConfigDefault.PruneBloomSize, "the amount of memory in megabytes to use for the pruning bloom filter (higher values prune better)") - f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMsg, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") + f.Int64(prefix+".reset-to-message", InitConfigDefault.ResetToMessage, "forces a reset to an old message height. Also set max-reorg-resequence-depth=0 to force re-reading messages") } func downloadInit(ctx context.Context, initConfig *InitConfig) (string, error) { @@ -515,7 +515,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo NextBlockNumber: config.Init.DevInitBlockNum, Accounts: []statetransfer.AccountInitializationInfo{ { - Addr: common.HexToAddress(config.Init.DevInitAddr), + Addr: common.HexToAddress(config.Init.DevInitAddress), EthBalance: new(big.Int).Mul(big.NewInt(params.Ether), big.NewInt(1000)), Nonce: 0, }, @@ -551,15 +551,15 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo if err != nil { return chainDb, nil, err } - combinedL2ChainInfoFiles := config.L2.ChainInfoFiles - if config.L2.ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.L2.ChainInfoIpfsUrl, config.L2.ChainInfoIpfsDownloadPath) + combinedL2ChainInfoFiles := config.Chain.InfoFiles + if config.Chain.InfoIpfsUrl != "" { + l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, config.Chain.InfoIpfsUrl, config.Chain.InfoIpfsDownloadPath) if err != nil { log.Error("error getting l2 chain info file from ipfs", "err", err) } combinedL2ChainInfoFiles = append(combinedL2ChainInfoFiles, l2ChainInfoIpfsFile) } - chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.L2.ChainID), config.L2.ChainName, genesisBlockNr, combinedL2ChainInfoFiles, config.L2.ChainInfoJson) + chainConfig, err = chaininfo.GetChainConfig(new(big.Int).SetUint64(config.Chain.ID), config.Chain.Name, genesisBlockNr, combinedL2ChainInfoFiles, config.Chain.InfoJson) if err != nil { return chainDb, nil, err } @@ -584,7 +584,7 @@ func openInitializeChainDb(ctx context.Context, stack *node.Node, config *NodeCo cacheConfig.SnapshotWait = true } var parsedInitMessage *arbostypes.ParsedInitMessage - if config.Node.L1Reader.Enable { + if config.Node.ParentChainReader.Enable { delayedBridge, err := arbnode.NewDelayedBridge(l1Client, rollupAddrs.Bridge, rollupAddrs.DeployedAt) if err != nil { return chainDb, nil, fmt.Errorf("failed creating delayed bridge while attempting to get serialized chain config from init message: %w", err) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index e4ab2ee490..a7dc7f26f9 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" _ "github.com/ethereum/go-ethereum/eth/tracers/js" _ "github.com/ethereum/go-ethereum/eth/tracers/native" @@ -48,6 +49,7 @@ import ( "github.com/offchainlabs/nitro/cmd/util" "github.com/offchainlabs/nitro/cmd/util/confighelpers" _ "github.com/offchainlabs/nitro/nodeInterface" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/headerreader" @@ -88,21 +90,21 @@ func addUnlockWallet(accountManager *accounts.Manager, walletConf *genericconf.W account.Address = common.HexToAddress(walletConf.Account) account, err = myKeystore.Find(account) } else { - if walletConf.Password() == nil { + if walletConf.Pwd() == nil { return common.Address{}, errors.New("l2 password not set") } if devPrivKey == nil { return common.Address{}, errors.New("l2 private key not set") } - account, err = myKeystore.ImportECDSA(devPrivKey, *walletConf.Password()) + account, err = myKeystore.ImportECDSA(devPrivKey, *walletConf.Pwd()) } if err != nil { return common.Address{}, err } - if walletConf.Password() == nil { + if walletConf.Pwd() == nil { return common.Address{}, errors.New("l2 password not set") } - err = myKeystore.Unlock(account, *walletConf.Password()) + err = myKeystore.Unlock(account, *walletConf.Pwd()) if err != nil { return common.Address{}, err } @@ -158,9 +160,10 @@ func mainImpl() int { } stackConf := node.DefaultConfig stackConf.DataDir = nodeConfig.Persistent.Chain + stackConf.DBEngine = "leveldb" nodeConfig.HTTP.Apply(&stackConf) nodeConfig.WS.Apply(&stackConf) - nodeConfig.AuthRPC.Apply(&stackConf) + nodeConfig.Auth.Apply(&stackConf) nodeConfig.IPC.Apply(&stackConf) nodeConfig.GraphQL.Apply(&stackConf) if nodeConfig.WS.ExposeAll { @@ -208,23 +211,23 @@ func mainImpl() int { log.Info("Running Arbitrum nitro node", "revision", vcsRevision, "vcs.time", vcsTime) if nodeConfig.Node.Dangerous.NoL1Listener { - nodeConfig.Node.L1Reader.Enable = false + nodeConfig.Node.ParentChainReader.Enable = false nodeConfig.Node.BatchPoster.Enable = false nodeConfig.Node.DelayedSequencer.Enable = false } else { - nodeConfig.Node.L1Reader.Enable = true + nodeConfig.Node.ParentChainReader.Enable = true } if nodeConfig.Node.Sequencer.Enable { - if nodeConfig.Node.ForwardingTarget() != "" { + if nodeConfig.Node.ForwardingTargetF() != "" { flag.Usage() log.Crit("forwarding-target cannot be set when sequencer is enabled") } - if nodeConfig.Node.L1Reader.Enable && nodeConfig.Node.InboxReader.HardReorg { + if nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { flag.Usage() log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") } - } else if nodeConfig.Node.ForwardingTargetImpl == "" { + } else if nodeConfig.Node.ForwardingTarget == "" { flag.Usage() log.Crit("forwarding-target unset, and not sequencer (can set to \"null\" to disable forwarding)") } @@ -240,17 +243,17 @@ func mainImpl() int { defaultL1WalletConfig := conf.DefaultL1WalletConfig defaultL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - nodeConfig.Node.Staker.L1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) + nodeConfig.Node.Staker.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultValidatorL1WalletConfig := staker.DefaultValidatorL1WalletConfig defaultValidatorL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - nodeConfig.Node.BatchPoster.L1Wallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) + nodeConfig.Node.BatchPoster.ParentChainWallet.ResolveDirectoryNames(nodeConfig.Persistent.Chain) defaultBatchPosterL1WalletConfig := arbnode.DefaultBatchPosterL1WalletConfig defaultBatchPosterL1WalletConfig.ResolveDirectoryNames(nodeConfig.Persistent.Chain) - if nodeConfig.Node.Staker.L1Wallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.L1Wallet == defaultBatchPosterL1WalletConfig { + if nodeConfig.Node.Staker.ParentChainWallet == defaultValidatorL1WalletConfig && nodeConfig.Node.BatchPoster.ParentChainWallet == defaultBatchPosterL1WalletConfig { if sequencerNeedsKey || validatorNeedsKey || l1Wallet.OnlyCreateKey { - l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + l1TransactionOpts, dataSigner, err = util.OpenWallet("l1", l1Wallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() log.Crit("error opening parent chain wallet", "path", l1Wallet.Pathname, "account", l1Wallet.Account, "err", err) @@ -265,31 +268,31 @@ func mainImpl() int { if *l1Wallet != defaultL1WalletConfig { log.Crit("--parent-chain.wallet cannot be set if either --node.staker.l1-wallet or --node.batch-poster.l1-wallet are set") } - if sequencerNeedsKey || nodeConfig.Node.BatchPoster.L1Wallet.OnlyCreateKey { - l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.L1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + if sequencerNeedsKey || nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsBatchPoster, dataSigner, err = util.OpenWallet("l1-batch-poster", &nodeConfig.Node.BatchPoster.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() - log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.L1Wallet.Pathname, "account", nodeConfig.Node.BatchPoster.L1Wallet.Account, "err", err) + log.Crit("error opening Batch poster parent chain wallet", "path", nodeConfig.Node.BatchPoster.ParentChainWallet.Pathname, "account", nodeConfig.Node.BatchPoster.ParentChainWallet.Account, "err", err) } - if nodeConfig.Node.BatchPoster.L1Wallet.OnlyCreateKey { + if nodeConfig.Node.BatchPoster.ParentChainWallet.OnlyCreateKey { return 0 } } - if validatorNeedsKey || nodeConfig.Node.Staker.L1Wallet.OnlyCreateKey { - l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.L1Wallet, new(big.Int).SetUint64(nodeConfig.L1.ChainID)) + if validatorNeedsKey || nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { + l1TransactionOptsValidator, _, err = util.OpenWallet("l1-validator", &nodeConfig.Node.Staker.ParentChainWallet, new(big.Int).SetUint64(nodeConfig.ParentChain.ID)) if err != nil { flag.Usage() - log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.L1Wallet.Pathname, "account", nodeConfig.Node.Staker.L1Wallet.Account, "err", err) + log.Crit("error opening Validator parent chain wallet", "path", nodeConfig.Node.Staker.ParentChainWallet.Pathname, "account", nodeConfig.Node.Staker.ParentChainWallet.Account, "err", err) } - if nodeConfig.Node.Staker.L1Wallet.OnlyCreateKey { + if nodeConfig.Node.Staker.ParentChainWallet.OnlyCreateKey { return 0 } } } - combinedL2ChainInfoFile := nodeConfig.L2.ChainInfoFiles - if nodeConfig.L2.ChainInfoIpfsUrl != "" { - l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.L2.ChainInfoIpfsUrl, nodeConfig.L2.ChainInfoIpfsDownloadPath) + combinedL2ChainInfoFile := nodeConfig.Chain.InfoFiles + if nodeConfig.Chain.InfoIpfsUrl != "" { + l2ChainInfoIpfsFile, err := util.GetL2ChainInfoIpfsFile(ctx, nodeConfig.Chain.InfoIpfsUrl, nodeConfig.Chain.InfoIpfsDownloadPath) if err != nil { log.Error("error getting chain info file from ipfs", "err", err) } @@ -297,7 +300,7 @@ func mainImpl() int { } if nodeConfig.Node.Staker.Enable { - if !nodeConfig.Node.L1Reader.Enable { + if !nodeConfig.Node.ParentChainReader.Enable { flag.Usage() log.Crit("validator must have the parent chain reader enabled") } @@ -324,8 +327,8 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client - if nodeConfig.Node.L1Reader.Enable { - confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().L1.Connection } + if nodeConfig.Node.ParentChainReader.Enable { + confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) err := rpcClient.Start(ctx) if err != nil { @@ -336,13 +339,13 @@ func mainImpl() int { if err != nil { log.Crit("couldn't read L1 chainid", "err", err) } - if l1ChainId.Uint64() != nodeConfig.L1.ChainID { - log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.L1.ChainID) + if l1ChainId.Uint64() != nodeConfig.ParentChain.ID { + log.Crit("L1 chainID doesn't fit config", "found", l1ChainId.Uint64(), "expected", nodeConfig.ParentChain.ID) } - log.Info("connected to l1 chain", "l1url", nodeConfig.L1.Connection.URL, "l1chainid", nodeConfig.L1.ChainID) + log.Info("connected to l1 chain", "l1url", nodeConfig.ParentChain.Connection.URL, "l1chainid", nodeConfig.ParentChain.ID) - rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.L2.ChainID, nodeConfig.L2.ChainName, combinedL2ChainInfoFile, nodeConfig.L2.ChainInfoJson) + rollupAddrs, err = chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses", "err", err) } @@ -353,14 +356,14 @@ func mainImpl() int { flag.Usage() log.Crit("--node.validator.only-create-wallet-contract requires --node.validator.use-smart-contract-wallet") } - l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.L1Reader }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1Client) + l1Reader, err := headerreader.New(ctx, l1Client, func() *headerreader.Config { return &liveNodeConfig.Get().Node.ParentChainReader }, arbSys) if err != nil { log.Crit("failed to get L1 headerreader", "error", err) - } // Just create validator smart wallet if needed then exit - deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.L2.ChainID, nodeConfig.L2.ChainName, combinedL2ChainInfoFile, nodeConfig.L2.ChainInfoJson) + deployInfo, err := chaininfo.GetRollupAddressesConfig(nodeConfig.Chain.ID, nodeConfig.Chain.Name, combinedL2ChainInfoFile, nodeConfig.Chain.InfoJson) if err != nil { log.Crit("error getting rollup addresses config", "err", err) } @@ -377,7 +380,7 @@ func mainImpl() int { nodeConfig.Node.TxLookupLimit = 0 } - resourcemanager.Init(&nodeConfig.Node.ResourceManagement) + resourcemanager.Init(&nodeConfig.Node.ResourceMgmt) var sameProcessValidationNodeEnabled bool if nodeConfig.Node.BlockValidator.Enable && (nodeConfig.Node.BlockValidator.ValidationServer.URL == "self" || nodeConfig.Node.BlockValidator.ValidationServer.URL == "self-auth") { @@ -396,7 +399,7 @@ func mainImpl() int { log.Crit("error opening L2 dev wallet", "err", err) } if devAddr != (common.Address{}) { - nodeConfig.Init.DevInitAddr = devAddr.String() + nodeConfig.Init.DevInitAddress = devAddr.String() } } @@ -412,7 +415,7 @@ func mainImpl() int { } }() - chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.L2.ChainID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) + chainDb, l2BlockChain, err := openInitializeChainDb(ctx, stack, nodeConfig, new(big.Int).SetUint64(nodeConfig.Chain.ID), execution.DefaultCacheConfigFor(stack, &nodeConfig.Node.Caching), l1Client, rollupAddrs) if l2BlockChain != nil { deferFuncs = append(deferFuncs, func() { l2BlockChain.Stop() }) } @@ -430,7 +433,7 @@ func mainImpl() int { return 1 } - if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMsg < 0 { + if nodeConfig.Init.ThenQuit && nodeConfig.Init.ResetToMessage < 0 { return 0 } @@ -525,8 +528,8 @@ func mainImpl() int { exitCode := 0 - if err == nil && nodeConfig.Init.ResetToMsg > 0 { - err = currentNode.TxStreamer.ReorgTo(arbutil.MessageIndex(nodeConfig.Init.ResetToMsg)) + if err == nil && nodeConfig.Init.ResetToMessage > 0 { + err = currentNode.TxStreamer.ReorgTo(arbutil.MessageIndex(nodeConfig.Init.ResetToMessage)) if err != nil { fatalErrChan <- fmt.Errorf("error reseting message: %w", err) exitCode = 1 @@ -557,8 +560,8 @@ type NodeConfig struct { Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` Node arbnode.Config `koanf:"node" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - L1 conf.L1Config `koanf:"parent-chain" reload:"hot"` - L2 conf.L2Config `koanf:"chain"` + ParentChain conf.L1Config `koanf:"parent-chain" reload:"hot"` + Chain conf.L2Config `koanf:"chain"` LogLevel int `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` @@ -566,7 +569,7 @@ type NodeConfig struct { HTTP genericconf.HTTPConfig `koanf:"http"` WS genericconf.WSConfig `koanf:"ws"` IPC genericconf.IPCConfig `koanf:"ipc"` - AuthRPC genericconf.AuthRPCConfig `koanf:"auth"` + Auth genericconf.AuthRPCConfig `koanf:"auth"` GraphQL genericconf.GraphQLConfig `koanf:"graphql"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -579,8 +582,8 @@ type NodeConfig struct { var NodeConfigDefault = NodeConfig{ Conf: genericconf.ConfConfigDefault, Node: arbnode.ConfigDefault, - L1: conf.L1ConfigDefault, - L2: conf.L2ConfigDefault, + ParentChain: conf.L1ConfigDefault, + Chain: conf.L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", Persistent: conf.PersistentConfigDefault, @@ -622,8 +625,8 @@ func (c *NodeConfig) ResolveDirectoryNames() error { if err != nil { return err } - c.L1.ResolveDirectoryNames(c.Persistent.Chain) - c.L2.ResolveDirectoryNames(c.Persistent.Chain) + c.ParentChain.ResolveDirectoryNames(c.Persistent.Chain) + c.Chain.ResolveDirectoryNames(c.Persistent.Chain) return nil } @@ -667,7 +670,7 @@ func (c *NodeConfig) CanReload(new *NodeConfig) error { } func (c *NodeConfig) Validate() error { - if err := c.L1.Validate(); err != nil { + if err := c.ParentChain.Validate(); err != nil { return err } return c.Node.Validate() @@ -741,10 +744,10 @@ func ParseNode(ctx context.Context, args []string) (*NodeConfig, *genericconf.Wa } // Don't pass around wallet contents with normal configuration - l1Wallet := nodeConfig.L1.Wallet - l2DevWallet := nodeConfig.L2.DevWallet - nodeConfig.L1.Wallet = genericconf.WalletConfigDefault - nodeConfig.L2.DevWallet = genericconf.WalletConfigDefault + l1Wallet := nodeConfig.ParentChain.Wallet + l2DevWallet := nodeConfig.Chain.DevWallet + nodeConfig.ParentChain.Wallet = genericconf.WalletConfigDefault + nodeConfig.Chain.DevWallet = genericconf.WalletConfigDefault err = nodeConfig.Validate() if err != nil { @@ -767,6 +770,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if err != nil { return false, err } + var parentChainIsArbitrum bool + if chainInfo.ParentChainIsArbitrum != nil { + parentChainIsArbitrum = *chainInfo.ParentChainIsArbitrum + } else { + log.Warn("Chain information parentChainIsArbitrum field missing, in the future this will be required", "chainId", chainId, "parentChainId", chainInfo.ParentChainId) + _, err := chaininfo.ProcessChainInfo(chainInfo.ParentChainId, "", combinedL2ChainInfoFiles, "") + if err == nil { + parentChainIsArbitrum = true + } + } chainDefaults := map[string]interface{}{ "persistent.chain": chainInfo.ChainName, "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), @@ -786,6 +799,16 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c if !chainInfo.HasGenesisState { chainDefaults["init.empty"] = true } + if parentChainIsArbitrum { + l2MaxTxSize := execution.DefaultSequencerConfig.MaxTxDataSize + bufferSpace := 5000 + if l2MaxTxSize < bufferSpace*2 { + return false, fmt.Errorf("not enough room in parent chain max tx size %v for bufferSpace %v * 2", l2MaxTxSize, bufferSpace) + } + safeBatchSize := l2MaxTxSize - bufferSpace + chainDefaults["node.batch-poster.max-size"] = safeBatchSize + chainDefaults["node.sequencer.max-tx-data-size"] = safeBatchSize - bufferSpace + } err = k.Load(confmap.Provider(chainDefaults, "."), nil) if err != nil { return false, err diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 2bff942a44..552838308d 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -63,7 +63,7 @@ func startup() error { ctx := context.Background() relayConfig, err := relay.ParseRelay(ctx, os.Args[1:]) - if err != nil || len(relayConfig.Node.Feed.Input.URLs) == 0 || relayConfig.Node.Feed.Input.URLs[0] == "" || relayConfig.L2.ChainId == 0 { + if err != nil || len(relayConfig.Node.Feed.Input.URL) == 0 || relayConfig.Node.Feed.Input.URL[0] == "" || relayConfig.Chain.ID == 0 { confighelpers.PrintErrorAndExit(err, printSampleUsage) } diff --git a/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go new file mode 100644 index 0000000000..782ab3801b --- /dev/null +++ b/cmd/seq-coordinator-manager/rediscoordinator/redis_coordinator.go @@ -0,0 +1,28 @@ +package rediscoordinator + +import ( + "context" + "errors" + "strings" + + "github.com/go-redis/redis/v8" + "github.com/offchainlabs/nitro/util/redisutil" +) + +// RedisCoordinator builds upon RedisCoordinator of redisutil with additional functionality +type RedisCoordinator struct { + *redisutil.RedisCoordinator +} + +// UpdatePriorities updates the priority list of sequencers +func (rc *RedisCoordinator) UpdatePriorities(ctx context.Context, priorities []string) error { + prioritiesString := strings.Join(priorities, ",") + err := rc.Client.Set(ctx, redisutil.PRIORITIES_KEY, prioritiesString, 0).Err() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + return err + } + return nil +} diff --git a/cmd/seq-coordinator-manager/seq-coordinator-manager.go b/cmd/seq-coordinator-manager/seq-coordinator-manager.go new file mode 100644 index 0000000000..a0123a9123 --- /dev/null +++ b/cmd/seq-coordinator-manager/seq-coordinator-manager.go @@ -0,0 +1,318 @@ +package main + +import ( + "context" + "fmt" + "os" + "strconv" + + "github.com/enescakir/emoji" + "github.com/ethereum/go-ethereum/log" + "github.com/gdamore/tcell/v2" + "github.com/offchainlabs/nitro/cmd/seq-coordinator-manager/rediscoordinator" + "github.com/offchainlabs/nitro/util/redisutil" + "github.com/rivo/tview" +) + +// Tview +var pages = tview.NewPages() +var app = tview.NewApplication() + +// Lists +var prioritySeqList = tview.NewList().ShowSecondaryText(false) +var nonPrioritySeqList = tview.NewList().ShowSecondaryText(false) + +// Forms +var addSeqForm = tview.NewForm() +var priorityForm = tview.NewForm() +var nonPriorityForm = tview.NewForm() + +// Sequencer coordinator managment UI data store +type manager struct { + redisCoordinator *rediscoordinator.RedisCoordinator + prioritiesSet map[string]bool + livelinessSet map[string]bool + priorityList []string + nonPriorityList []string +} + +func main() { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + args := os.Args[1:] + if len(args) != 1 { + fmt.Fprintf(os.Stderr, "Usage: seq-coordinator-manager [redis-url]\n") + os.Exit(1) + } + redisURL := args[0] + redisutilCoordinator, err := redisutil.NewRedisCoordinator(redisURL) + if err != nil { + panic(err) + } + + seqManager := &manager{ + redisCoordinator: &rediscoordinator.RedisCoordinator{ + RedisCoordinator: redisutilCoordinator, + }, + prioritiesSet: make(map[string]bool), + livelinessSet: make(map[string]bool), + } + + seqManager.refreshAllLists(ctx) + seqManager.populateLists(ctx) + + prioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { + nonPriorityForm.Clear(true) + + n := len(seqManager.priorityList) + priorities := make([]string, n) + for i := 0; i < n; i++ { + priorities[i] = strconv.Itoa(i) + } + + target := index + priorityForm.Clear(true) + priorityForm.AddDropDown("Change priority to ->", priorities, index, func(priority string, selection int) { + target = selection + }) + priorityForm.AddButton("Update", func() { + if target != index { + seqManager.updatePriorityList(ctx, index, target) + } + priorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.AddButton("Cancel", func() { + priorityForm.Clear(true) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.AddButton("Remove", func() { + url := seqManager.priorityList[index] + delete(seqManager.prioritiesSet, url) + seqManager.updatePriorityList(ctx, index, 0) + seqManager.priorityList = seqManager.priorityList[1:] + + priorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + }) + priorityForm.SetFocus(0) + app.SetFocus(priorityForm) + }) + + nonPrioritySeqList.SetSelectedFunc(func(index int, name string, second_name string, shortcut rune) { + priorityForm.Clear(true) + + n := len(seqManager.priorityList) + priorities := make([]string, n+1) + for i := 0; i < n+1; i++ { + priorities[i] = strconv.Itoa(i) + } + + target := index + nonPriorityForm.Clear(true) + nonPriorityForm.AddDropDown("Set priority to ->", priorities, index, func(priority string, selection int) { + target = selection + }) + nonPriorityForm.AddButton("Update", func() { + key := seqManager.nonPriorityList[index] + seqManager.priorityList = append(seqManager.priorityList, key) + seqManager.prioritiesSet[key] = true + + index = len(seqManager.priorityList) - 1 + seqManager.updatePriorityList(ctx, index, target) + + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + if len(seqManager.nonPriorityList) > 0 { + app.SetFocus(nonPrioritySeqList) + } else { + app.SetFocus(prioritySeqList) + } + }) + nonPriorityForm.AddButton("Cancel", func() { + nonPriorityForm.Clear(true) + pages.SwitchToPage("Menu") + app.SetFocus(nonPrioritySeqList) + }) + nonPriorityForm.SetFocus(0) + app.SetFocus(nonPriorityForm) + }) + + // UI design + flex := tview.NewFlex() + priorityHeading := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("-----Priority List-----") + nonPriorityHeading := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("-----Not in priority list but online-----") + instructions := tview.NewTextView(). + SetTextColor(tcell.ColorYellow). + SetText("(r) to refresh\n(s) to save all changes\n(c) to switch between lists\n(a) to add sequencer\n(q) to quit\n(tab) to navigate") + + flex.SetDirection(tview.FlexRow). + AddItem(priorityHeading, 0, 1, false). + AddItem(tview.NewFlex(). + AddItem(prioritySeqList, 0, 2, true). + AddItem(priorityForm, 0, 3, true), 0, 12, true). + AddItem(nonPriorityHeading, 0, 1, false). + AddItem(tview.NewFlex(). + AddItem(nonPrioritySeqList, 0, 2, true). + AddItem(nonPriorityForm, 0, 3, true), 0, 12, true). + AddItem(instructions, 0, 3, false).SetBorder(true) + + flex.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { + if event.Rune() == 114 { + seqManager.refreshAllLists(ctx) + priorityForm.Clear(true) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + } else if event.Rune() == 115 { + seqManager.pushUpdates(ctx) + priorityForm.Clear(true) + nonPriorityForm.Clear(true) + seqManager.populateLists(ctx) + pages.SwitchToPage("Menu") + app.SetFocus(prioritySeqList) + } else if event.Rune() == 97 { + addSeqForm.Clear(true) + seqManager.addSeqPriorityForm(ctx) + pages.SwitchToPage("Add Sequencer") + } else if event.Rune() == 99 { + if prioritySeqList.HasFocus() || priorityForm.HasFocus() { + priorityForm.Clear(true) + app.SetFocus(nonPrioritySeqList) + } else { + nonPriorityForm.Clear(true) + app.SetFocus(prioritySeqList) + } + } else if event.Rune() == 113 { + app.Stop() + } + return event + }) + + pages.AddPage("Menu", flex, true, true) + pages.AddPage("Add Sequencer", addSeqForm, true, false) + + if err := app.SetRoot(pages, true).EnableMouse(true).Run(); err != nil { + panic(err) + } +} + +// updatePriorityList updates the list by changing the position of seq present at `index` to target +func (sm *manager) updatePriorityList(ctx context.Context, index int, target int) { + for i := index - 1; i >= target; i-- { + sm.priorityList[i], sm.priorityList[i+1] = sm.priorityList[i+1], sm.priorityList[i] + } + for i := index + 1; i <= target; i++ { + sm.priorityList[i], sm.priorityList[i-1] = sm.priorityList[i-1], sm.priorityList[i] + } + + urlList := []string{} + for url := range sm.livelinessSet { + if _, ok := sm.prioritiesSet[url]; !ok { + urlList = append(urlList, url) + } + } + sm.nonPriorityList = urlList +} + +// populateLists populates seq's in priority list and seq's that are online but not in priority +func (sm *manager) populateLists(ctx context.Context) { + prioritySeqList.Clear() + chosen, err := sm.redisCoordinator.CurrentChosenSequencer(ctx) + if err != nil { + panic(err) + } + for index, seqURL := range sm.priorityList { + sec := "" + if seqURL == chosen { + sec = fmt.Sprintf(" %vchosen", emoji.LeftArrow) + } + status := fmt.Sprintf("(%d) %v ", index, emoji.RedCircle) + if _, ok := sm.livelinessSet[seqURL]; ok { + status = fmt.Sprintf("(%d) %v ", index, emoji.GreenCircle) + } + prioritySeqList.AddItem(status+seqURL+sec, "", rune(0), nil).SetSecondaryTextColor(tcell.ColorPurple) + } + + nonPrioritySeqList.Clear() + status := fmt.Sprintf("(-) %v ", emoji.GreenCircle) + for _, seqURL := range sm.nonPriorityList { + nonPrioritySeqList.AddItem(status+seqURL, "", rune(0), nil) + } +} + +// addSeqPriorityForm returns a form with fields to add a new sequencer to priority list +func (sm *manager) addSeqPriorityForm(ctx context.Context) *tview.Form { + URL := "" + addSeqForm.AddInputField("Sequencer URL", "", 0, nil, func(url string) { + URL = url + }) + addSeqForm.AddButton("Cancel", func() { + priorityForm.Clear(true) + sm.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + addSeqForm.AddButton("Add", func() { + // check if url is valid, i.e it doesnt already exist in the priority list + if _, ok := sm.prioritiesSet[URL]; !ok && URL != "" { + sm.prioritiesSet[URL] = true + sm.priorityList = append(sm.priorityList, URL) + } + sm.populateLists(ctx) + pages.SwitchToPage("Menu") + }) + return addSeqForm +} + +// pushUpdates pushes the local changes to the redis server +func (sm *manager) pushUpdates(ctx context.Context) { + err := sm.redisCoordinator.UpdatePriorities(ctx, sm.priorityList) + if err != nil { + log.Warn("Failed to push local changes to the priority list") + } + sm.refreshAllLists(ctx) +} + +// refreshAllLists gets the current status of all the lists displayed in the UI +func (sm *manager) refreshAllLists(ctx context.Context) { + priorityList, err := sm.redisCoordinator.GetPriorities(ctx) + if err != nil { + panic(err) + } + sm.priorityList = priorityList + sm.prioritiesSet = getMapfromlist(priorityList) + + livelinessList, err := sm.redisCoordinator.GetLiveliness(ctx) + if err != nil { + panic(err) + } + sm.livelinessSet = getMapfromlist(livelinessList) + + urlList := []string{} + for url := range sm.livelinessSet { + if _, ok := sm.prioritiesSet[url]; !ok { + urlList = append(urlList, url) + } + } + sm.nonPriorityList = urlList +} + +func getMapfromlist(list []string) map[string]bool { + mapping := make(map[string]bool) + for _, url := range list { + mapping[url] = true + } + return mapping +} diff --git a/cmd/util/keystore.go b/cmd/util/keystore.go index 56749f9722..52a18a42b5 100644 --- a/cmd/util/keystore.go +++ b/cmd/util/keystore.go @@ -79,7 +79,7 @@ func openKeystore(ks *keystore.KeyStore, description string, walletConfig *gener if !creatingNew && walletConfig.OnlyCreateKey { return nil, fmt.Errorf("wallet key already created, backup key (%s) and remove --%s.wallet.only-create-key to run normally", walletConfig.Pathname, description) } - passOpt := walletConfig.Password() + passOpt := walletConfig.Pwd() var password string if passOpt != nil { password = *passOpt diff --git a/cmd/util/keystore_test.go b/cmd/util/keystore_test.go index 17a0498d68..1ee579de28 100644 --- a/cmd/util/keystore_test.go +++ b/cmd/util/keystore_test.go @@ -29,7 +29,7 @@ func createWallet(t *testing.T, pathname string) { walletConf := genericconf.WalletConfigDefault walletConf.Pathname = pathname walletConf.OnlyCreateKey = true - walletConf.PasswordImpl = "foo" + walletConf.Password = "foo" testPassCalled := false testPass := func() (string, error) { @@ -69,7 +69,7 @@ func TestExistingKeystoreNoCreate(t *testing.T) { walletConf := genericconf.WalletConfigDefault walletConf.Pathname = pathname walletConf.OnlyCreateKey = true - walletConf.PasswordImpl = "foo" + walletConf.Password = "foo" testPassCalled := false testPass := func() (string, error) { diff --git a/contracts b/contracts index 33dd360abf..fbb26ac2f3 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 33dd360abf85b0ef8491d595cf9a28ee7ca37b8d +Subproject commit fbb26ac2f3e1cb55e938da304a7a8c9e82b1f7d8 diff --git a/das/aggregator.go b/das/aggregator.go index 33ce5ad489..e8cc0a3c25 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -82,10 +82,10 @@ func NewServiceDetails(service DataAvailabilityServiceWriter, pubKey blsSignatur } func NewAggregator(ctx context.Context, config DataAvailabilityConfig, services []ServiceDetails) (*Aggregator, error) { - if config.L1NodeURL == "none" { + if config.ParentChainNodeURL == "none" { return NewAggregatorWithSeqInboxCaller(config, services, nil) } - l1client, err := GetL1Client(ctx, config.L1ConnectionAttempts, config.L1NodeURL) + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) if err != nil { return nil, err } @@ -118,7 +118,7 @@ func NewAggregatorWithSeqInboxCaller( seqInboxCaller *bridgegen.SequencerInboxCaller, ) (*Aggregator, error) { - keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.AggregatorConfig.AssumedHonest)) + keysetHash, keysetBytes, err := KeysetHashFromServices(services, uint64(config.RPCAggregator.AssumedHonest)) if err != nil { return nil, err } @@ -129,11 +129,11 @@ func NewAggregatorWithSeqInboxCaller( } return &Aggregator{ - config: config.AggregatorConfig, + config: config.RPCAggregator, services: services, requestTimeout: config.RequestTimeout, - requiredServicesForStore: len(services) + 1 - config.AggregatorConfig.AssumedHonest, - maxAllowedServiceStoreFailures: config.AggregatorConfig.AssumedHonest - 1, + requiredServicesForStore: len(services) + 1 - config.RPCAggregator.AssumedHonest, + maxAllowedServiceStoreFailures: config.RPCAggregator.AssumedHonest - 1, keysetHash: keysetHash, keysetBytes: keysetBytes, bpVerifier: bpVerifier, @@ -290,6 +290,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, cd.aggSignersMask = aggSignersMask certDetailsChan <- cd returned = true + if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet + storeFailures+1 > a.maxAllowedServiceStoreFailures { + log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"") + } } else if storeFailures > a.maxAllowedServiceStoreFailures { cd := certDetails{} cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, BatchToDasFailed) diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 1b6c60c675..776af3975b 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -34,10 +34,10 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ PrivKey: privKey, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } storageServices = append(storageServices, NewMemoryBackedStorageService(ctx)) @@ -49,7 +49,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { backends = append(backends, *details) } - aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{AggregatorConfig: AggregatorConfig{AssumedHonest: 1}, L1NodeURL: "none"}, backends) + aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1}, ParentChainNodeURL: "none"}, backends) Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") @@ -187,10 +187,10 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ PrivKey: privKey, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } storageServices = append(storageServices, NewMemoryBackedStorageService(ctx)) @@ -205,9 +205,9 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { aggregator, err := NewAggregator( ctx, DataAvailabilityConfig{ - AggregatorConfig: AggregatorConfig{AssumedHonest: assumedHonest}, - L1NodeURL: "none", - RequestTimeout: time.Millisecond * 2000, + RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest}, + ParentChainNodeURL: "none", + RequestTimeout: time.Millisecond * 2000, }, backends) Require(t, err) diff --git a/das/das.go b/das/das.go index a5d5c8d560..208a12cc83 100644 --- a/das/das.go +++ b/das/das.go @@ -40,22 +40,22 @@ type DataAvailabilityConfig struct { RequestTimeout time.Duration `koanf:"request-timeout"` - LocalCacheConfig BigCacheConfig `koanf:"local-cache"` - RedisCacheConfig RedisConfig `koanf:"redis-cache"` + LocalCache BigCacheConfig `koanf:"local-cache"` + RedisCache RedisConfig `koanf:"redis-cache"` - LocalDBStorageConfig LocalDBStorageConfig `koanf:"local-db-storage"` - LocalFileStorageConfig LocalFileStorageConfig `koanf:"local-file-storage"` - S3StorageServiceConfig S3StorageServiceConfig `koanf:"s3-storage"` - IpfsStorageServiceConfig IpfsStorageServiceConfig `koanf:"ipfs-storage"` - RegularSyncStorageConfig RegularSyncStorageConfig `koanf:"regular-sync-storage"` + LocalDBStorage LocalDBStorageConfig `koanf:"local-db-storage"` + LocalFileStorage LocalFileStorageConfig `koanf:"local-file-storage"` + S3Storage S3StorageServiceConfig `koanf:"s3-storage"` + IpfsStorage IpfsStorageServiceConfig `koanf:"ipfs-storage"` + RegularSyncStorage RegularSyncStorageConfig `koanf:"regular-sync-storage"` - KeyConfig KeyConfig `koanf:"key"` + Key KeyConfig `koanf:"key"` - AggregatorConfig AggregatorConfig `koanf:"rpc-aggregator"` - RestfulClientAggregatorConfig RestfulClientAggregatorConfig `koanf:"rest-aggregator"` + RPCAggregator AggregatorConfig `koanf:"rpc-aggregator"` + RestAggregator RestfulClientAggregatorConfig `koanf:"rest-aggregator"` - L1NodeURL string `koanf:"parent-chain-node-url"` - L1ConnectionAttempts int `koanf:"parent-chain-connection-attempts"` + ParentChainNodeURL string `koanf:"parent-chain-node-url"` + ParentChainConnectionAttempts int `koanf:"parent-chain-connection-attempts"` SequencerInboxAddress string `koanf:"sequencer-inbox-address"` ExtraSignatureCheckingPublicKey string `koanf:"extra-signature-checking-public-key"` @@ -66,8 +66,8 @@ type DataAvailabilityConfig struct { var DefaultDataAvailabilityConfig = DataAvailabilityConfig{ RequestTimeout: 5 * time.Second, Enable: false, - RestfulClientAggregatorConfig: DefaultRestfulClientAggregatorConfig, - L1ConnectionAttempts: 15, + RestAggregator: DefaultRestfulClientAggregatorConfig, + ParentChainConnectionAttempts: 15, PanicOnError: false, } @@ -132,8 +132,8 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { IpfsStorageServiceConfigAddOptions(prefix+".ipfs-storage", f) RestfulClientAggregatorConfigAddOptions(prefix+".rest-aggregator", f) - f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.L1NodeURL, "URL for L1 node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") - f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.L1ConnectionAttempts, "layer 1 RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's L1 configuration is used") + f.String(prefix+".parent-chain-node-url", DefaultDataAvailabilityConfig.ParentChainNodeURL, "URL for L1 node, only used in standalone daserver; when running as part of a node that node's L1 configuration is used") + f.Int(prefix+".parent-chain-connection-attempts", DefaultDataAvailabilityConfig.ParentChainConnectionAttempts, "layer 1 RPC connection attempts (spaced out at least 1 second per attempt, 0 to retry infinitely), only used in standalone daserver; when running as part of a node that node's L1 configuration is used") f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "L1 address of SequencerInbox contract") } diff --git a/das/das_test.go b/das/das_test.go index 7318afac19..416744535b 100644 --- a/das/das_test.go +++ b/das/das_test.go @@ -32,18 +32,18 @@ func testDASStoreRetrieveMultipleInstances(t *testing.T, storageType string) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: LocalDBStorageConfig{ + LocalDBStorage: LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } var syncFromStorageServicesFirst []*IterableStorageService @@ -124,18 +124,18 @@ func testDASMissingMessage(t *testing.T, storageType string) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: LocalDBStorageConfig{ + LocalDBStorage: LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, - L1NodeURL: "none", + ParentChainNodeURL: "none", } var syncFromStorageServices []*IterableStorageService diff --git a/das/db_storage_service.go b/das/db_storage_service.go index fb89b1cf30..b9af530b9e 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -20,11 +20,11 @@ import ( ) type LocalDBStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultLocalDBStorageConfig = LocalDBStorageConfig{} @@ -33,8 +33,8 @@ func LocalDBStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalDBStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a database on the local filesystem") f.String(prefix+".data-dir", DefaultLocalDBStorageConfig.DataDir, "directory in which to store the database") f.Bool(prefix+".discard-after-timeout", DefaultLocalDBStorageConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageServices, "enable db storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageServices, "enable db storage to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultLocalDBStorageConfig.SyncFromStorageService, "enable db storage to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultLocalDBStorageConfig.SyncToStorageService, "enable db storage to be used as a sink for regular sync storage") } type DBStorageService struct { diff --git a/das/factory.go b/das/factory.go index 96df5b474d..0e6b292005 100644 --- a/das/factory.go +++ b/das/factory.go @@ -27,59 +27,59 @@ func CreatePersistentStorageService( ) (StorageService, *LifecycleManager, error) { storageServices := make([]StorageService, 0, 10) var lifecycleManager LifecycleManager - if config.LocalDBStorageConfig.Enable { - s, err := NewDBStorageService(ctx, config.LocalDBStorageConfig.DataDir, config.LocalDBStorageConfig.DiscardAfterTimeout) + if config.LocalDBStorage.Enable { + s, err := NewDBStorageService(ctx, config.LocalDBStorage.DataDir, config.LocalDBStorage.DiscardAfterTimeout) if err != nil { return nil, nil, err } - if config.LocalDBStorageConfig.SyncFromStorageServices { + if config.LocalDBStorage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.LocalDBStorageConfig.SyncToStorageServices { + if config.LocalDBStorage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } lifecycleManager.Register(s) storageServices = append(storageServices, s) } - if config.LocalFileStorageConfig.Enable { - s, err := NewLocalFileStorageService(config.LocalFileStorageConfig.DataDir) + if config.LocalFileStorage.Enable { + s, err := NewLocalFileStorageService(config.LocalFileStorage.DataDir) if err != nil { return nil, nil, err } - if config.LocalFileStorageConfig.SyncFromStorageServices { + if config.LocalFileStorage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.LocalFileStorageConfig.SyncToStorageServices { + if config.LocalFileStorage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } lifecycleManager.Register(s) storageServices = append(storageServices, s) } - if config.S3StorageServiceConfig.Enable { - s, err := NewS3StorageService(config.S3StorageServiceConfig) + if config.S3Storage.Enable { + s, err := NewS3StorageService(config.S3Storage) if err != nil { return nil, nil, err } lifecycleManager.Register(s) - if config.S3StorageServiceConfig.SyncFromStorageServices { + if config.S3Storage.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(s)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) s = iterableStorageService } - if config.S3StorageServiceConfig.SyncToStorageServices { + if config.S3Storage.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, s) } storageServices = append(storageServices, s) } - if config.IpfsStorageServiceConfig.Enable { - s, err := NewIpfsStorageService(ctx, config.IpfsStorageServiceConfig) + if config.IpfsStorage.Enable { + s, err := NewIpfsStorageService(ctx, config.IpfsStorage) if err != nil { return nil, nil, err } @@ -114,23 +114,23 @@ func WrapStorageWithCache( // Enable caches, Redis and (local) BigCache. Local is the outermost, so it will be tried first. var err error - if config.RedisCacheConfig.Enable { - storageService, err = NewRedisStorageService(config.RedisCacheConfig, storageService) + if config.RedisCache.Enable { + storageService, err = NewRedisStorageService(config.RedisCache, storageService) lifecycleManager.Register(storageService) if err != nil { return nil, err } - if config.RedisCacheConfig.SyncFromStorageServices { + if config.RedisCache.SyncFromStorageService { iterableStorageService := NewIterableStorageService(ConvertStorageServiceToIterationCompatibleStorageService(storageService)) *syncFromStorageServices = append(*syncFromStorageServices, iterableStorageService) storageService = iterableStorageService } - if config.RedisCacheConfig.SyncToStorageServices { + if config.RedisCache.SyncToStorageService { *syncToStorageServices = append(*syncToStorageServices, storageService) } } - if config.LocalCacheConfig.Enable { - storageService, err = NewBigCacheStorageService(config.LocalCacheConfig, storageService) + if config.LocalCache.Enable { + storageService, err = NewBigCacheStorageService(config.LocalCache, storageService) lifecycleManager.Register(storageService) if err != nil { return nil, err @@ -151,11 +151,11 @@ func CreateBatchPosterDAS( } // Check config requirements - if !config.AggregatorConfig.Enable || !config.RestfulClientAggregatorConfig.Enable { + if !config.RPCAggregator.Enable || !config.RestAggregator.Enable { return nil, nil, nil, errors.New("--node.data-availability.rpc-aggregator.enable and rest-aggregator.enable must be set when running a Batch Poster in AnyTrust mode") } - if config.IpfsStorageServiceConfig.Enable { + if config.IpfsStorage.Enable { return nil, nil, nil, errors.New("--node.data-availability.ipfs-storage.enable may not be set when running a Nitro AnyTrust node in Batch Poster mode") } // Done checking config requirements @@ -173,7 +173,7 @@ func CreateBatchPosterDAS( } } - restAgg, err := NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, nil, err } @@ -200,10 +200,10 @@ func CreateDAComponentsForDaserver( } // Check config requirements - if !config.LocalDBStorageConfig.Enable && - !config.LocalFileStorageConfig.Enable && - !config.S3StorageServiceConfig.Enable && - !config.IpfsStorageServiceConfig.Enable { + if !config.LocalDBStorage.Enable && + !config.LocalFileStorage.Enable && + !config.S3Storage.Enable && + !config.IpfsStorage.Enable { return nil, nil, nil, nil, errors.New("At least one of --data-availability.(local-db-storage|local-file-storage|s3-storage|ipfs-storage) must be enabled.") } // Done checking config requirements @@ -222,15 +222,15 @@ func CreateDAComponentsForDaserver( // The REST aggregator is used as the fallback if requested data is not present // in the storage service. - if config.RestfulClientAggregatorConfig.Enable { - restAgg, err := NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + if config.RestAggregator.Enable { + restAgg, err := NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, nil, nil, err } restAgg.Start(ctx) dasLifecycleManager.Register(restAgg) - syncConf := &config.RestfulClientAggregatorConfig.SyncToStorageConfig + syncConf := &config.RestAggregator.SyncToStorage var retentionPeriodSeconds uint64 if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { retentionPeriodSeconds = math.MaxUint64 @@ -266,7 +266,7 @@ func CreateDAComponentsForDaserver( var daReader DataAvailabilityServiceReader = storageService var daHealthChecker DataAvailabilityServiceHealthChecker = storageService - if config.KeyConfig.KeyDir != "" || config.KeyConfig.PrivKey != "" { + if config.Key.KeyDir != "" || config.Key.PrivKey != "" { var seqInboxCaller *bridgegen.SequencerInboxCaller if seqInboxAddress != nil { seqInbox, err := bridgegen.NewSequencerInbox(*seqInboxAddress, (*l1Reader).Client()) @@ -280,7 +280,7 @@ func CreateDAComponentsForDaserver( seqInboxCaller = nil } - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() if err != nil { return nil, nil, nil, nil, err } @@ -296,8 +296,8 @@ func CreateDAComponentsForDaserver( } } - if config.RegularSyncStorageConfig.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { - regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorageConfig) + if config.RegularSyncStorage.Enable && len(syncFromStorageServices) != 0 && len(syncToStorageServices) != 0 { + regularlySyncStorage := NewRegularlySyncStorage(syncFromStorageServices, syncToStorageServices, config.RegularSyncStorage) regularlySyncStorage.Start(ctx) } @@ -322,15 +322,15 @@ func CreateDAReaderForNode( } // Check config requirements - if config.AggregatorConfig.Enable { + if config.RPCAggregator.Enable { return nil, nil, errors.New("node.data-availability.rpc-aggregator is only for Batch Poster mode") } - if !config.RestfulClientAggregatorConfig.Enable && !config.IpfsStorageServiceConfig.Enable { + if !config.RestAggregator.Enable && !config.IpfsStorage.Enable { return nil, nil, fmt.Errorf("--node.data-availability.enable was set but neither of --node.data-availability.(rest-aggregator|ipfs-storage) were enabled. When running a Nitro Anytrust node in non-Batch Poster mode, some way to get the batch data is required.") } - if config.RestfulClientAggregatorConfig.SyncToStorageConfig.Eager { + if config.RestAggregator.SyncToStorage.Eager { return nil, nil, errors.New("--node.data-availability.rest-aggregator.sync-to-storage.eager can't be used with a Nitro node, only lazy syncing can be used.") } // Done checking config requirements @@ -341,9 +341,9 @@ func CreateDAReaderForNode( } var daReader DataAvailabilityServiceReader - if config.RestfulClientAggregatorConfig.Enable { + if config.RestAggregator.Enable { var restAgg *SimpleDASReaderAggregator - restAgg, err = NewRestfulClientAggregator(ctx, &config.RestfulClientAggregatorConfig) + restAgg, err = NewRestfulClientAggregator(ctx, &config.RestAggregator) if err != nil { return nil, nil, err } @@ -351,7 +351,7 @@ func CreateDAReaderForNode( dasLifecycleManager.Register(restAgg) if storageService != nil { - syncConf := &config.RestfulClientAggregatorConfig.SyncToStorageConfig + syncConf := &config.RestAggregator.SyncToStorage var retentionPeriodSeconds uint64 if uint64(syncConf.RetentionPeriod) == math.MaxUint64 { retentionPeriodSeconds = math.MaxUint64 diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 9fd831f480..5fa5306e39 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -22,10 +22,10 @@ import ( ) type LocalFileStorageConfig struct { - Enable bool `koanf:"enable"` - DataDir string `koanf:"data-dir"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + DataDir string `koanf:"data-dir"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ @@ -35,8 +35,8 @@ var DefaultLocalFileStorageConfig = LocalFileStorageConfig{ func LocalFileStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultLocalFileStorageConfig.Enable, "enable storage/retrieval of sequencer batch data from a directory of files, one per batch") f.String(prefix+".data-dir", DefaultLocalFileStorageConfig.DataDir, "local data directory") - f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageServices, "enable local storage to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageServices, "enable local storage to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultLocalFileStorageConfig.SyncFromStorageService, "enable local storage to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultLocalFileStorageConfig.SyncToStorageService, "enable local storage to be used as a sink for regular sync storage") } type LocalFileStorageService struct { diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index a005c70a44..3449a8e78c 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -24,27 +24,27 @@ import ( ) type RedisConfig struct { - Enable bool `koanf:"enable"` - RedisUrl string `koanf:"redis-url"` - Expiration time.Duration `koanf:"redis-expiration"` - KeyConfig string `koanf:"redis-key-config"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + Url string `koanf:"url"` + Expiration time.Duration `koanf:"expiration"` + KeyConfig string `koanf:"key-config"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultRedisConfig = RedisConfig{ - RedisUrl: "", + Url: "", Expiration: time.Hour, KeyConfig: "", } func RedisConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultRedisConfig.Enable, "enable Redis caching of sequencer batch data") - f.String(prefix+".redis-url", DefaultRedisConfig.RedisUrl, "Redis url") - f.Duration(prefix+".redis-expiration", DefaultRedisConfig.Expiration, "Redis expiration") - f.String(prefix+".redis-key-config", DefaultRedisConfig.KeyConfig, "Redis key config") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageServices, "enable Redis to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageServices, "enable Redis to be used as a sink for regular sync storage") + f.String(prefix+".url", DefaultRedisConfig.Url, "Redis url") + f.Duration(prefix+".expiration", DefaultRedisConfig.Expiration, "Redis expiration") + f.String(prefix+".key-config", DefaultRedisConfig.KeyConfig, "Redis key config") + f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable Redis to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable Redis to be used as a sink for regular sync storage") } type RedisStorageService struct { @@ -55,7 +55,7 @@ type RedisStorageService struct { } func NewRedisStorageService(redisConfig RedisConfig, baseStorageService StorageService) (StorageService, error) { - redisClient, err := redisutil.RedisClientFromURL(redisConfig.RedisUrl) + redisClient, err := redisutil.RedisClientFromURL(redisConfig.Url) if err != nil { return nil, err } diff --git a/das/redis_storage_service_test.go b/das/redis_storage_service_test.go index 2481358cf6..55f3ecd82c 100644 --- a/das/redis_storage_service_test.go +++ b/das/redis_storage_service_test.go @@ -23,7 +23,7 @@ func TestRedisStorageService(t *testing.T) { redisService, err := NewRedisStorageService( RedisConfig{ Enable: true, - RedisUrl: "redis://" + server.Addr(), + Url: "redis://" + server.Addr(), Expiration: time.Hour, KeyConfig: "b561f5d5d98debc783aa8a1472d67ec3bcd532a1c8d95e5cb23caa70c649f7c9", }, baseStorageService) @@ -75,7 +75,7 @@ func TestRedisStorageService(t *testing.T) { redisServiceWithEmptyBaseStorage, err := NewRedisStorageService( RedisConfig{ Enable: true, - RedisUrl: "redis://" + server.Addr(), + Url: "redis://" + server.Addr(), Expiration: time.Hour, KeyConfig: "b561f5d5d98debc783aa8a1472d67ec3bcd532a1c8d95e5cb23caa70c649f7c9", }, emptyBaseStorageService) diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index cc455250d3..134c4229c8 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -28,7 +28,7 @@ type BackendConfig struct { } func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggregator, error) { - services, err := ParseServices(config.AggregatorConfig) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func NewRPCAggregator(ctx context.Context, config DataAvailabilityConfig) (*Aggr } func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil.L1Interface, seqInboxAddress common.Address) (*Aggregator, error) { - services, err := ParseServices(config.AggregatorConfig) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } @@ -44,7 +44,7 @@ func NewRPCAggregatorWithL1Info(config DataAvailabilityConfig, l1client arbutil. } func NewRPCAggregatorWithSeqInboxCaller(config DataAvailabilityConfig, seqInboxCaller *bridgegen.SequencerInboxCaller) (*Aggregator, error) { - services, err := ParseServices(config.AggregatorConfig) + services, err := ParseServices(config.RPCAggregator) if err != nil { return nil, err } diff --git a/das/rpc_test.go b/das/rpc_test.go index 6dcb8457c2..044ba597be 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -35,15 +35,15 @@ func TestRPC(t *testing.T) { config := DataAvailabilityConfig{ Enable: true, - KeyConfig: KeyConfig{ + Key: KeyConfig{ KeyDir: keyDir, }, - LocalFileStorageConfig: LocalFileStorageConfig{ + LocalFileStorage: LocalFileStorageConfig{ Enable: true, DataDir: dataDir, }, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } var syncFromStorageServices []*IterableStorageService @@ -51,7 +51,7 @@ func TestRPC(t *testing.T) { storageService, lifecycleManager, err := CreatePersistentStorageService(ctx, &config, &syncFromStorageServices, &syncToStorageServices) testhelpers.RequireImpl(t, err) defer lifecycleManager.StopAndWaitUntil(time.Second) - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() testhelpers.RequireImpl(t, err) localDas, err := NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, "") testhelpers.RequireImpl(t, err) @@ -71,7 +71,7 @@ func TestRPC(t *testing.T) { backendsJsonByte, err := json.Marshal([]BackendConfig{beConfig}) testhelpers.RequireImpl(t, err) aggConf := DataAvailabilityConfig{ - AggregatorConfig: AggregatorConfig{ + RPCAggregator: AggregatorConfig{ AssumedHonest: 1, Backends: string(backendsJsonByte), }, diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 18a9ce1475..1a3ae94114 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -34,15 +34,15 @@ type S3Downloader interface { } type S3StorageServiceConfig struct { - Enable bool `koanf:"enable"` - AccessKey string `koanf:"access-key"` - Bucket string `koanf:"bucket"` - ObjectPrefix string `koanf:"object-prefix"` - Region string `koanf:"region"` - SecretKey string `koanf:"secret-key"` - DiscardAfterTimeout bool `koanf:"discard-after-timeout"` - SyncFromStorageServices bool `koanf:"sync-from-storage-service"` - SyncToStorageServices bool `koanf:"sync-to-storage-service"` + Enable bool `koanf:"enable"` + AccessKey string `koanf:"access-key"` + Bucket string `koanf:"bucket"` + ObjectPrefix string `koanf:"object-prefix"` + Region string `koanf:"region"` + SecretKey string `koanf:"secret-key"` + DiscardAfterTimeout bool `koanf:"discard-after-timeout"` + SyncFromStorageService bool `koanf:"sync-from-storage-service"` + SyncToStorageService bool `koanf:"sync-to-storage-service"` } var DefaultS3StorageServiceConfig = S3StorageServiceConfig{} @@ -55,8 +55,8 @@ func S3ConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".region", DefaultS3StorageServiceConfig.Region, "S3 region") f.String(prefix+".secret-key", DefaultS3StorageServiceConfig.SecretKey, "S3 secret key") f.Bool(prefix+".discard-after-timeout", DefaultS3StorageServiceConfig.DiscardAfterTimeout, "discard data after its expiry timeout") - f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageServices, "enable s3 to be used as a source for regular sync storage") - f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageServices, "enable s3 to be used as a sink for regular sync storage") + f.Bool(prefix+".sync-from-storage-service", DefaultRedisConfig.SyncFromStorageService, "enable s3 to be used as a source for regular sync storage") + f.Bool(prefix+".sync-to-storage-service", DefaultRedisConfig.SyncToStorageService, "enable s3 to be used as a sink for regular sync storage") } type S3StorageService struct { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 1a244ab640..5d612574bf 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -86,14 +86,14 @@ type SignAfterStoreDASWriter struct { } func NewSignAfterStoreDASWriter(ctx context.Context, config DataAvailabilityConfig, storageService StorageService) (*SignAfterStoreDASWriter, error) { - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() if err != nil { return nil, err } - if config.L1NodeURL == "none" { + if config.ParentChainNodeURL == "none" { return NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, nil, storageService, config.ExtraSignatureCheckingPublicKey) } - l1client, err := GetL1Client(ctx, config.L1ConnectionAttempts, config.L1NodeURL) + l1client, err := GetL1Client(ctx, config.ParentChainConnectionAttempts, config.ParentChainNodeURL) if err != nil { return nil, err } diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index b2499b680a..eb82a33837 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -25,28 +25,28 @@ import ( // RestfulDasClients, so the configuration and factory function are given more // specific names. type RestfulClientAggregatorConfig struct { - Enable bool `koanf:"enable"` - Urls []string `koanf:"urls"` - OnlineUrlList string `koanf:"online-url-list"` - OnlineUrlListFetchInterval time.Duration `koanf:"online-url-list-fetch-interval"` - Strategy string `koanf:"strategy"` - StrategyUpdateInterval time.Duration `koanf:"strategy-update-interval"` - WaitBeforeTryNext time.Duration `koanf:"wait-before-try-next"` - MaxPerEndpointStats int `koanf:"max-per-endpoint-stats"` - SimpleExploreExploitStrategyConfig SimpleExploreExploitStrategyConfig `koanf:"simple-explore-exploit-strategy"` - SyncToStorageConfig SyncToStorageConfig `koanf:"sync-to-storage"` + Enable bool `koanf:"enable"` + Urls []string `koanf:"urls"` + OnlineUrlList string `koanf:"online-url-list"` + OnlineUrlListFetchInterval time.Duration `koanf:"online-url-list-fetch-interval"` + Strategy string `koanf:"strategy"` + StrategyUpdateInterval time.Duration `koanf:"strategy-update-interval"` + WaitBeforeTryNext time.Duration `koanf:"wait-before-try-next"` + MaxPerEndpointStats int `koanf:"max-per-endpoint-stats"` + SimpleExploreExploitStrategy SimpleExploreExploitStrategyConfig `koanf:"simple-explore-exploit-strategy"` + SyncToStorage SyncToStorageConfig `koanf:"sync-to-storage"` } var DefaultRestfulClientAggregatorConfig = RestfulClientAggregatorConfig{ - Urls: []string{}, - OnlineUrlList: "", - OnlineUrlListFetchInterval: 1 * time.Hour, - Strategy: "simple-explore-exploit", - StrategyUpdateInterval: 10 * time.Second, - WaitBeforeTryNext: 2 * time.Second, - MaxPerEndpointStats: 20, - SimpleExploreExploitStrategyConfig: DefaultSimpleExploreExploitStrategyConfig, - SyncToStorageConfig: DefaultSyncToStorageConfig, + Urls: []string{}, + OnlineUrlList: "", + OnlineUrlListFetchInterval: 1 * time.Hour, + Strategy: "simple-explore-exploit", + StrategyUpdateInterval: 10 * time.Second, + WaitBeforeTryNext: 2 * time.Second, + MaxPerEndpointStats: 20, + SimpleExploreExploitStrategy: DefaultSimpleExploreExploitStrategyConfig, + SyncToStorage: DefaultSyncToStorageConfig, } type SimpleExploreExploitStrategyConfig struct { @@ -120,8 +120,8 @@ func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggreg switch strings.ToLower(config.Strategy) { case "simple-explore-exploit": a.strategy = &simpleExploreExploitStrategy{ - exploreIterations: uint32(config.SimpleExploreExploitStrategyConfig.ExploreIterations), - exploitIterations: uint32(config.SimpleExploreExploitStrategyConfig.ExploitIterations), + exploreIterations: uint32(config.SimpleExploreExploitStrategy.ExploreIterations), + exploitIterations: uint32(config.SimpleExploreExploitStrategy.ExploitIterations), } case "testing-sequential": a.strategy = &testingSequentialStrategy{} diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 8a4115514d..91f2e522a7 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -57,32 +57,32 @@ func init() { } type SyncToStorageConfig struct { - CheckAlreadyExists bool `koanf:"check-already-exists"` - Eager bool `koanf:"eager"` - EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` - RetentionPeriod time.Duration `koanf:"retention-period"` - DelayOnError time.Duration `koanf:"delay-on-error"` - IgnoreWriteErrors bool `koanf:"ignore-write-errors"` - L1BlocksPerRead uint64 `koanf:"parent-chain-blocks-per-read"` - StateDir string `koanf:"state-dir"` + CheckAlreadyExists bool `koanf:"check-already-exists"` + Eager bool `koanf:"eager"` + EagerLowerBoundBlock uint64 `koanf:"eager-lower-bound-block"` + RetentionPeriod time.Duration `koanf:"retention-period"` + DelayOnError time.Duration `koanf:"delay-on-error"` + IgnoreWriteErrors bool `koanf:"ignore-write-errors"` + ParentChainBlocksPerRead uint64 `koanf:"parent-chain-blocks-per-read"` + StateDir string `koanf:"state-dir"` } var DefaultSyncToStorageConfig = SyncToStorageConfig{ - CheckAlreadyExists: true, - Eager: false, - EagerLowerBoundBlock: 0, - RetentionPeriod: time.Duration(math.MaxInt64), - DelayOnError: time.Second, - IgnoreWriteErrors: true, - L1BlocksPerRead: 100, - StateDir: "", + CheckAlreadyExists: true, + Eager: false, + EagerLowerBoundBlock: 0, + RetentionPeriod: time.Duration(math.MaxInt64), + DelayOnError: time.Second, + IgnoreWriteErrors: true, + ParentChainBlocksPerRead: 100, + StateDir: "", } func SyncToStorageConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".check-already-exists", DefaultSyncToStorageConfig.CheckAlreadyExists, "check if the data already exists in this DAS's storage. Must be disabled for fast sync with an IPFS backend") f.Bool(prefix+".eager", DefaultSyncToStorageConfig.Eager, "eagerly sync batch data to this DAS's storage from the rest endpoints, using L1 as the index of batch data hashes; otherwise only sync lazily") f.Uint64(prefix+".eager-lower-bound-block", DefaultSyncToStorageConfig.EagerLowerBoundBlock, "when eagerly syncing, start indexing forward from this L1 block. Only used if there is no sync state") - f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.L1BlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") + f.Uint64(prefix+".parent-chain-blocks-per-read", DefaultSyncToStorageConfig.ParentChainBlocksPerRead, "when eagerly syncing, max l1 blocks to read per poll") f.Duration(prefix+".retention-period", DefaultSyncToStorageConfig.RetentionPeriod, "period to retain synced data (defaults to forever)") f.Duration(prefix+".delay-on-error", DefaultSyncToStorageConfig.DelayOnError, "time to wait if encountered an error before retrying") f.Bool(prefix+".ignore-write-errors", DefaultSyncToStorageConfig.IgnoreWriteErrors, "log only on failures to write when syncing; otherwise treat it as an error") @@ -348,9 +348,9 @@ func (s *l1SyncService) readMore(ctx context.Context) error { } } } - if highBlockNr > s.lowBlockNr+s.config.L1BlocksPerRead { + if highBlockNr > s.lowBlockNr+s.config.ParentChainBlocksPerRead { s.catchingUp = true - highBlockNr = s.lowBlockNr + s.config.L1BlocksPerRead + highBlockNr = s.lowBlockNr + s.config.ParentChainBlocksPerRead if finalizedHighBlockNr > highBlockNr { finalizedHighBlockNr = highBlockNr } diff --git a/go-ethereum b/go-ethereum index d312afd03b..3f2e789b38 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit d312afd03bba77aa2b4ea36e80b7308cd6528e80 +Subproject commit 3f2e789b3857ccdd647c319e16f1a00805d1d6bd diff --git a/go.mod b/go.mod index 5adfd19388..cc31c4a23d 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/ethereum/go-ethereum v1.10.26 + github.com/fatih/structtag v1.2.0 github.com/google/go-cmp v0.5.9 github.com/hashicorp/golang-lru/v2 v2.0.1 github.com/ipfs/go-cid v0.3.2 @@ -32,6 +33,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/term v0.6.0 + golang.org/x/tools v0.7.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -86,11 +88,14 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect + github.com/enescakir/emoji v1.0.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.1 // indirect + github.com/gdamore/encoding v1.0.0 // indirect + github.com/gdamore/tcell/v2 v2.6.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -188,6 +193,7 @@ require ( github.com/libp2p/go-reuseport v0.2.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect github.com/libp2p/zeroconf/v2 v2.2.0 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/miekg/dns v1.1.50 // indirect @@ -224,6 +230,8 @@ require ( github.com/quic-go/webtransport-go v0.5.2 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect + github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 // indirect + github.com/rivo/uniseg v0.4.3 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect @@ -257,7 +265,6 @@ require ( go4.org v0.0.0-20200411211856-f5505b9728dd // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/tools v0.7.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect google.golang.org/grpc v1.46.0 // indirect @@ -281,7 +288,6 @@ require ( github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect github.com/go-ole/go-ole v1.2.1 // indirect @@ -298,7 +304,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mitchellh/mapstructure v1.4.2 github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect @@ -312,7 +318,7 @@ require ( golang.org/x/crypto v0.6.0 golang.org/x/net v0.8.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 + golang.org/x/sys v0.7.0 golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/go.sum b/go.sum index 58155db124..4362d4b01d 100644 --- a/go.sum +++ b/go.sum @@ -302,7 +302,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= @@ -310,6 +309,8 @@ github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/ github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= +github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -326,6 +327,8 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:Jp github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -348,6 +351,10 @@ github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZ github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.6.0 h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg= +github.com/gdamore/tcell/v2 v2.6.0/go.mod h1:be9omFATkdr0D9qewWW3d+MEvl5dha+Etb5y65J2H8Y= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= @@ -1140,6 +1147,8 @@ github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1174,6 +1183,8 @@ github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -1443,6 +1454,11 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rhnvrm/simples3 v0.6.1 h1:H0DJwybR6ryQE+Odi9eqkHuzjYAeJgtGcGtuBwOhsH8= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 h1:ZyM/+FYnpbZsFWuCohniM56kRoHRB4r5EuIzXEYkpxo= +github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703/go.mod h1:nVwGv4MP47T0jvlk7KuTTjjuSmrGO4JF0iaiNt4bufE= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1631,6 +1647,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1774,6 +1791,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1835,6 +1853,7 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1857,6 +1876,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1948,13 +1968,20 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1966,6 +1993,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2035,6 +2063,7 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/linter/koanf/handlers.go b/linter/koanf/handlers.go new file mode 100644 index 0000000000..5826004014 --- /dev/null +++ b/linter/koanf/handlers.go @@ -0,0 +1,227 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/fatih/structtag" + "golang.org/x/tools/go/analysis" +) + +// handleComposite tracks use of fields in composite literals. +// E.g. `Config{A: 1, B: 2, C: 3}` will increase counters of fields A,B and C. +func handleComposite(pass *analysis.Pass, cl *ast.CompositeLit, cnt map[string]int) { + id, ok := cl.Type.(*ast.Ident) + if !ok { + return + } + for _, e := range cl.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if ki, ok := kv.Key.(*ast.Ident); ok { + fi := pass.TypesInfo.Types[id].Type.String() + "." + ki.Name + cnt[normalizeID(pass, fi)]++ + } + } + } +} + +// handleSelector handles selector expression recursively, that is an expression: +// a.B.C.D will update counter for fields: a.B.C.D, a.B.C and a.B. +// It updates counters map in place, increasing corresponding identifiers by +// increaseBy amount. +func handleSelector(pass *analysis.Pass, se *ast.SelectorExpr, increaseBy int, cnt map[string]int) string { + if e, ok := se.X.(*ast.SelectorExpr); ok { + // Full field identifier, including package name. + fi := pass.TypesInfo.Types[e].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + prefix := handleSelector(pass, e, increaseBy, cnt) + fi = prefix + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + return fi + } + // Handle selectors on function calls, e.g. `config().Enabled`. + if _, ok := se.X.(*ast.CallExpr); ok { + fi := pass.TypesInfo.Types[se.X].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + return fi + } + if ident, ok := se.X.(*ast.Ident); ok { + if pass.TypesInfo.Types[ident].Type != nil { + fi := pass.TypesInfo.Types[ident].Type.String() + "." + se.Sel.Name + cnt[normalizeID(pass, fi)] += increaseBy + return fi + } + } + return "" +} + +// koanfFields returns a map of fields that have koanf tag. +func koanfFields(pass *analysis.Pass) map[string]token.Pos { + res := make(map[string]token.Pos) + for _, f := range pass.Files { + pkgName := f.Name.Name + ast.Inspect(f, func(node ast.Node) bool { + if ts, ok := node.(*ast.TypeSpec); ok { + st, ok := ts.Type.(*ast.StructType) + if !ok { + return true + } + for _, f := range st.Fields.List { + if tag := tagFromField(f); tag != "" { + t := strings.Join([]string{pkgName, ts.Name.Name, f.Names[0].Name}, ".") + res[t] = f.Pos() + } + } + } + return true + }) + } + return res +} + +func containsFlagSet(params []*ast.Field) bool { + for _, p := range params { + se, ok := p.Type.(*ast.StarExpr) + if !ok { + continue + } + sle, ok := se.X.(*ast.SelectorExpr) + if !ok { + continue + } + if sle.Sel.Name == "FlagSet" { + return true + } + } + return false +} + +// checkFlagDefs checks flag definitions in the function. +// Result contains list of errors where flag name doesn't match field name. +func checkFlagDefs(pass *analysis.Pass, f *ast.FuncDecl, cnt map[string]int) Result { + // Ignore functions that does not get flagset as parameter. + if !containsFlagSet(f.Type.Params.List) { + return Result{} + } + var res Result + for _, s := range f.Body.List { + es, ok := s.(*ast.ExprStmt) + if !ok { + continue + } + callE, ok := es.X.(*ast.CallExpr) + if !ok { + continue + } + if len(callE.Args) != 3 { + continue + } + sl, ok := extractStrLit(callE.Args[0]) + if !ok { + continue + } + s, ok := selectorName(callE.Args[1]) + if !ok { + continue + } + handleSelector(pass, callE.Args[1].(*ast.SelectorExpr), -1, cnt) + if normSL := normalizeTag(sl); !strings.EqualFold(normSL, s) { + res.Errors = append(res.Errors, koanfError{ + Pos: f.Pos(), + Message: fmt.Sprintf("koanf tag name: %q doesn't match the field: %q", sl, s), + err: errIncorrectFlag, + }) + } + + } + return res +} + +func selectorName(e ast.Expr) (string, bool) { + n, ok := e.(ast.Node) + if !ok { + return "", false + } + se, ok := n.(*ast.SelectorExpr) + if !ok { + return "", false + } + return se.Sel.Name, true +} + +// Extracts literal from expression that is either: +// - string literal or +// - sum of variable and string literal. +// E.g. +// strLitFromSum(`"max-size"`) = "max-size" +// - strLitFromSum(`prefix + ".enable"“) = ".enable". +func extractStrLit(e ast.Expr) (string, bool) { + if s, ok := strLit(e); ok { + return s, true + } + if be, ok := e.(*ast.BinaryExpr); ok { + if be.Op == token.ADD { + if s, ok := strLit(be.Y); ok { + // Drop the prefix dot. + return s[1:], true + } + } + } + return "", false +} + +func strLit(e ast.Expr) (string, bool) { + if s, ok := e.(*ast.BasicLit); ok { + if s.Kind == token.STRING { + return strings.Trim(s.Value, "\""), true + } + } + return "", false +} + +// tagFromField extracts koanf tag from struct field. +func tagFromField(f *ast.Field) string { + if f.Tag == nil { + return "" + } + tags, err := structtag.Parse(strings.Trim((f.Tag.Value), "`")) + if err != nil { + return "" + } + tag, err := tags.Get("koanf") + if err != nil { + return "" + } + return normalizeTag(tag.Name) +} + +// checkStruct returns violations where koanf tag name doesn't match field names. +func checkStruct(pass *analysis.Pass, s *ast.StructType) Result { + var res Result + for _, f := range s.Fields.List { + tag := tagFromField(f) + if tag == "" { + continue + } + fieldName := f.Names[0].Name + if !strings.EqualFold(tag, fieldName) { + res.Errors = append(res.Errors, koanfError{ + Pos: f.Pos(), + Message: fmt.Sprintf("field name: %q doesn't match tag name: %q\n", fieldName, tag), + err: errMismatch, + }) + } + } + return res +} + +func normalizeTag(s string) string { + return strings.ReplaceAll(s, "-", "") +} + +func normalizeID(pass *analysis.Pass, id string) string { + id = strings.TrimPrefix(id, "*") + return pass.Pkg.Name() + strings.TrimPrefix(id, pass.Pkg.Path()) +} diff --git a/linter/koanf/koanf.go b/linter/koanf/koanf.go new file mode 100644 index 0000000000..d6780760e7 --- /dev/null +++ b/linter/koanf/koanf.go @@ -0,0 +1,107 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +var ( + errUnused = errors.New("unused") + errMismatch = errors.New("mismmatched field name and tag in a struct") + // e.g. f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + errIncorrectFlag = errors.New("mismatching flag initialization") +) + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +var Analyzer = &analysis.Analyzer{ + Name: "koanfcheck", + Doc: "check for koanf misconfigurations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testkoanfcheck", + Doc: "check for koanf misconfigurations (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// koanfError indicates the position of an error in configuration. +type koanfError struct { + Pos token.Pos + Message string + err error +} + +// Result is returned from the checkStruct function, and holds all the +// configuration errors. +type Result struct { + Errors []koanfError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ( + ret Result + cnt = make(map[string]int) + // koanfFields map contains all the struct koanfFields that have koanf tag. + // It identifies field as "{pkgName}.{structName}.{field_Name}". + // e.g. "a.BatchPosterConfig.Enable", "a.BatchPosterConfig.MaxSize" + koanfFields = koanfFields(pass) + ) + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + var res Result + switch v := node.(type) { + case *ast.StructType: + res = checkStruct(pass, v) + case *ast.FuncDecl: + res = checkFlagDefs(pass, v, cnt) + case *ast.SelectorExpr: + handleSelector(pass, v, 1, cnt) + case *ast.IfStmt: + if se, ok := v.Cond.(*ast.SelectorExpr); ok { + handleSelector(pass, se, 1, cnt) + } + case *ast.CompositeLit: + handleComposite(pass, v, cnt) + default: + } + ret.Errors = append(ret.Errors, res.Errors...) + return true + }) + } + for k := range koanfFields { + if cnt[k] == 0 { + ret.Errors = append(ret.Errors, + koanfError{ + Pos: koanfFields[k], + Message: fmt.Sprintf("field %v not used", k), + err: errUnused, + }) + } + } + for _, err := range ret.Errors { + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + Message: err.Message, + Category: "koanf", + }) + } + } + return ret, nil +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/koanf/koanf_test.go b/linter/koanf/koanf_test.go new file mode 100644 index 0000000000..064ae533c4 --- /dev/null +++ b/linter/koanf/koanf_test.go @@ -0,0 +1,71 @@ +package main + +import ( + "errors" + "os" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "golang.org/x/tools/go/analysis/analysistest" +) + +var ( + incorrectFlag = "incorrect_flag" + mismatch = "mismatch" + unused = "unused" +) + +func testData(t *testing.T) string { + t.Helper() + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + return filepath.Join(filepath.Dir(wd), "testdata") +} + +// Tests koanf/a package that contains two types of errors where: +// - koanf tag doesn't match field name. +// - flag definition doesn't match field name. +// Errors are marked as comments in the package source file. +func TestMismatch(t *testing.T) { + testdata := testData(t) + got := errCounts(analysistest.Run(t, testdata, analyzerForTests, "koanf/a")) + want := map[string]int{ + incorrectFlag: 2, + mismatch: 1, + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("analysistest.Run() unexpected diff:\n%s\n", diff) + } +} + +func TestUnused(t *testing.T) { + testdata := testData(t) + got := errCounts(analysistest.Run(t, testdata, analyzerForTests, "koanf/b")) + if diff := cmp.Diff(got, map[string]int{"unused": 2}); diff != "" { + t.Errorf("analysistest.Run() unexpected diff:\n%s\n", diff) + } +} + +func errCounts(res []*analysistest.Result) map[string]int { + m := make(map[string]int) + for _, r := range res { + if rs, ok := r.Result.(Result); ok { + for _, e := range rs.Errors { + var s string + switch { + case errors.Is(e.err, errIncorrectFlag): + s = incorrectFlag + case errors.Is(e.err, errMismatch): + s = mismatch + case errors.Is(e.err, errUnused): + s = unused + } + m[s] = m[s] + 1 + } + } + } + return m +} diff --git a/linter/pointercheck/pointer.go b/linter/pointercheck/pointer.go new file mode 100644 index 0000000000..6500b01222 --- /dev/null +++ b/linter/pointercheck/pointer.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +var Analyzer = &analysis.Analyzer{ + Name: "pointercheck", + Doc: "check for pointer comparison", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "testpointercheck", + Doc: "check for pointer comparison (for tests)", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +// pointerCmpError indicates the position of pointer comparison. +type pointerCmpError struct { + Pos token.Position + Message string +} + +// Result is returned from the checkStruct function, and holds all the +// configuration errors. +type Result struct { + Errors []pointerCmpError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ret Result + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + var res *Result + switch e := node.(type) { + case *ast.BinaryExpr: + res = checkExpr(pass, e) + default: + } + if res == nil { + return true + } + for _, err := range res.Errors { + ret.Errors = append(ret.Errors, err) + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: err.Message, + Category: "pointercheck", + }) + } + } + return true + }, + ) + } + return ret, nil +} + +func checkExpr(pass *analysis.Pass, e *ast.BinaryExpr) *Result { + if e.Op != token.EQL && e.Op != token.NEQ { + return nil + } + ret := &Result{} + if ptrIdent(pass, e.X) && ptrIdent(pass, e.Y) { + ret.Errors = append(ret.Errors, pointerCmpError{ + Pos: pass.Fset.Position(e.Pos()), + Message: fmt.Sprintf("comparison of two pointers in expression %v", e), + }) + } + return ret +} + +func ptrIdent(pass *analysis.Pass, e ast.Expr) bool { + switch tp := e.(type) { + case *ast.Ident, *ast.SelectorExpr: + et := pass.TypesInfo.Types[tp].Type + _, isPtr := (et).(*types.Pointer) + return isPtr + } + return false +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/pointercheck/pointer_test.go b/linter/pointercheck/pointer_test.go new file mode 100644 index 0000000000..290e3826de --- /dev/null +++ b/linter/pointercheck/pointer_test.go @@ -0,0 +1,31 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func TestAll(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + testdata := filepath.Join(filepath.Dir(wd), "testdata") + res := analysistest.Run(t, testdata, analyzerForTests, "pointercheck") + if cnt := countErrors(res); cnt != 6 { + t.Errorf("analysistest.Run() got %v errors, expected 6", cnt) + } +} + +func countErrors(errs []*analysistest.Result) int { + cnt := 0 + for _, e := range errs { + if r, ok := e.Result.(Result); ok { + cnt += len(r.Errors) + } + } + return cnt +} diff --git a/linter/structinit/structinit.go b/linter/structinit/structinit.go new file mode 100644 index 0000000000..e4e65bc3fc --- /dev/null +++ b/linter/structinit/structinit.go @@ -0,0 +1,122 @@ +package main + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/singlechecker" +) + +// Tip for linter that struct that has this comment should be included in the +// analysis. +// Note: comment should be directly line above the struct definition. +const linterTip = "// lint:require-exhaustive-initialization" + +func New(conf any) ([]*analysis.Analyzer, error) { + return []*analysis.Analyzer{Analyzer}, nil +} + +// Analyzer implements struct analyzer for structs that are annotated with +// `linterTip`, it checks that every instantiation initializes all the fields. +var Analyzer = &analysis.Analyzer{ + Name: "structinit", + Doc: "check for struct field initializations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(false, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +var analyzerForTests = &analysis.Analyzer{ + Name: "teststructinit", + Doc: "check for struct field initializations", + Run: func(p *analysis.Pass) (interface{}, error) { return run(true, p) }, + ResultType: reflect.TypeOf(Result{}), +} + +type structError struct { + Pos token.Pos + Message string +} + +type Result struct { + Errors []structError +} + +func run(dryRun bool, pass *analysis.Pass) (interface{}, error) { + var ( + ret Result + structs = markedStructs(pass) + ) + for _, f := range pass.Files { + ast.Inspect(f, func(node ast.Node) bool { + // For every composite literal check that number of elements in + // the literal match the number of struct fields. + if cl, ok := node.(*ast.CompositeLit); ok { + stName := pass.TypesInfo.Types[cl].Type.String() + if cnt, found := structs[stName]; found && cnt != len(cl.Elts) { + ret.Errors = append(ret.Errors, structError{ + Pos: cl.Pos(), + Message: fmt.Sprintf("struct: %q initialized with: %v of total: %v fields", stName, len(cl.Elts), cnt), + }) + + } + + } + return true + }) + } + for _, err := range ret.Errors { + if !dryRun { + pass.Report(analysis.Diagnostic{ + Pos: err.Pos, + Message: err.Message, + Category: "structinit", + }) + } + } + return ret, nil +} + +// markedStructs returns a map of structs that are annotated for linter to check +// that all fields are initialized when the struct is instantiated. +// It maps struct full name (including package path) to number of fields it contains. +func markedStructs(pass *analysis.Pass) map[string]int { + res := make(map[string]int) + for _, f := range pass.Files { + tips := make(map[position]bool) + ast.Inspect(f, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.Comment: + p := pass.Fset.Position(node.Pos()) + if strings.Contains(n.Text, linterTip) { + tips[position{p.Filename, p.Line + 1}] = true + } + case *ast.TypeSpec: + if st, ok := n.Type.(*ast.StructType); ok { + p := pass.Fset.Position(st.Struct) + if tips[position{p.Filename, p.Line}] { + fieldsCnt := 0 + for _, field := range st.Fields.List { + fieldsCnt += len(field.Names) + } + res[pass.Pkg.Path()+"."+n.Name.Name] = fieldsCnt + } + } + } + return true + }) + } + return res +} + +type position struct { + fileName string + line int +} + +func main() { + singlechecker.Main(Analyzer) +} diff --git a/linter/structinit/structinit_test.go b/linter/structinit/structinit_test.go new file mode 100644 index 0000000000..db3676e185 --- /dev/null +++ b/linter/structinit/structinit_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +func testData(t *testing.T) string { + t.Helper() + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get wd: %s", err) + } + return filepath.Join(filepath.Dir(wd), "testdata") +} + +func TestLinter(t *testing.T) { + testdata := testData(t) + got := errCount(analysistest.Run(t, testdata, analyzerForTests, "structinit/a")) + if got != 2 { + t.Errorf("analysistest.Run() got %d errors, expected 2", got) + } +} + +func errCount(res []*analysistest.Result) int { + cnt := 0 + for _, r := range res { + if rs, ok := r.Result.(Result); ok { + cnt += len(rs.Errors) + } + } + return cnt +} diff --git a/linter/testdata/src/koanf/a/a.go b/linter/testdata/src/koanf/a/a.go new file mode 100644 index 0000000000..a0513fb09b --- /dev/null +++ b/linter/testdata/src/koanf/a/a.go @@ -0,0 +1,58 @@ +package a + +import ( + "flag" +) + +type Config struct { + L2 int `koanf:"chain"` // Err: mismatch. + LogLevel int `koanf:"log-level"` + LogType int `koanf:"log-type"` + Metrics int `koanf:"metrics"` + PProf int `koanf:"pprof"` + Node int `koanf:"node"` + Queue int `koanf:"queue"` +} + +// Cover using of all fields in a various way: + +// Instantiating a type. +var defaultConfig = Config{ + L2: 1, + LogLevel: 2, +} + +// Instantiating a type an taking reference. +var defaultConfigPtr = &Config{ + LogType: 3, + Metrics: 4, +} + +func init() { + defaultConfig.PProf = 5 + defaultConfig.Node, _ = 6, 0 + defaultConfigPtr.Queue = 7 +} + +type BatchPosterConfig struct { + Enable bool `koanf:"enable"` + MaxSize int `koanf:"max-size" reload:"hot"` +} + +var DefaultBatchPosterConfig BatchPosterConfig + +func BatchPosterConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enabled", DefaultBatchPosterConfig.Enable, "") // Err: incorrect flag. + f.Int("max-sz", DefaultBatchPosterConfig.MaxSize, "") // Err: incorrect flag. +} + +func ConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") + f.Int("max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") +} + +func init() { + // Fields must be used outside flag definitions at least once. + DefaultBatchPosterConfig.Enable = true + DefaultBatchPosterConfig.MaxSize = 3 +} diff --git a/linter/testdata/src/koanf/b/b.go b/linter/testdata/src/koanf/b/b.go new file mode 100644 index 0000000000..fe958f17b3 --- /dev/null +++ b/linter/testdata/src/koanf/b/b.go @@ -0,0 +1,52 @@ +package b + +import ( + "flag" + "fmt" +) + +type ParCfg struct { + child ChildCfg `koanf:"child"` + grandChild GrandChildCfg `koanf:grandchild` +} + +var defaultCfg = ParCfg{} + +type ChildCfg struct { + A bool `koanf:"A"` + B bool `koanf:"B"` + C bool `koanf:"C"` + D bool `koanf:"D"` // Error: not used outside flag definition. +} + +var defaultChildCfg = ChildCfg{} + +func childConfigAddOptions(prefix string, f *flag.FlagSet) { + f.Bool(prefix+".a", defaultChildCfg.A, "") + f.Bool("b", defaultChildCfg.B, "") + f.Bool("c", defaultChildCfg.C, "") + f.Bool("d", defaultChildCfg.D, "") +} + +type GrandChildCfg struct { + A int `koanf:"A"` // Error: unused. +} + +func (c *GrandChildCfg) Do() { +} + +func configPtr() *ChildCfg { + return nil +} +func config() ChildCfg { + return ChildCfg{} +} + +func init() { + fmt.Printf("%v %v", config().A, configPtr().B) + // This covers usage of both `ParCfg.Child` and `ChildCfg.C`. + _ = defaultCfg.child.C + // Covers usage of grandChild. + defaultCfg.grandChild.Do() + +} diff --git a/linter/testdata/src/pointercheck/pointercheck.go b/linter/testdata/src/pointercheck/pointercheck.go new file mode 100644 index 0000000000..f63fdd1743 --- /dev/null +++ b/linter/testdata/src/pointercheck/pointercheck.go @@ -0,0 +1,50 @@ +package pointercheck + +import "fmt" + +type A struct { + x, y int +} + +// pointerCmp compares pointers, sometimes inside +func pointerCmp() { + a, b := &A{}, &A{} + // Simple comparions. + if a != b { + fmt.Println("Not Equal") + } + if a == b { + fmt.Println("Equals") + } + // Nested binary expressions. + if (2 > 1) && (a != b) { + fmt.Println("Still not equal") + } + if (174%15 > 3) && (2 > 1 && (1+2 > 2 || a != b)) { + fmt.Println("Who knows at this point") + } + // Nested and inside unary operator. + if 10 > 5 && !(2 > 1 || a == b) { + fmt.Println("Not equal") + } + c, d := 1, 2 + if &c != &d { + fmt.Println("Not equal") + } +} + +func legitCmps() { + a, b := &A{}, &A{} + if a.x == b.x { + fmt.Println("Allowed") + } +} + +type cache struct { + dirty *A +} + +// matches does pointer comparison. +func (c *cache) matches(a *A) bool { + return c.dirty == a +} diff --git a/linter/testdata/src/structinit/a/a.go b/linter/testdata/src/structinit/a/a.go new file mode 100644 index 0000000000..45f6059726 --- /dev/null +++ b/linter/testdata/src/structinit/a/a.go @@ -0,0 +1,33 @@ +package a + +import "fmt" + +// lint:require-exhaustive-initialization +type interestingStruct struct { + x int + b *boringStruct +} + +type boringStruct struct { + x, y int +} + +func init() { + a := &interestingStruct{ // Error: only single field is initialized. + x: 1, + } + fmt.Println(a) + b := interestingStruct{ // Error: only single field is initialized. + b: nil, + } + fmt.Println(b) + c := interestingStruct{ // Not an error, all fields are initialized. + x: 1, + b: nil, + } + fmt.Println(c) + d := &boringStruct{ // Not an error since it's not annotated for the linter. + x: 1, + } + fmt.Println(d) +} diff --git a/nitro-testnode b/nitro-testnode index 14f24a1bad..7ad12c0f1b 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit 14f24a1bad2625412602d06156156c380bd589d2 +Subproject commit 7ad12c0f1be75a72c7360d5258e0090f8225594e diff --git a/nodeInterface/NodeInterface.go b/nodeInterface/NodeInterface.go index a363458663..f13f8ce6c0 100644 --- a/nodeInterface/NodeInterface.go +++ b/nodeInterface/NodeInterface.go @@ -590,3 +590,80 @@ func (n NodeInterface) LegacyLookupMessageBatchProof(c ctx, evm mech, batchNum h calldataForL1 = data return } + +// L2BlockRangeForL1 fetches the L1 block number of a given l2 block number. +// c ctx and evm mech arguments are not used but supplied to match the precompile function type in NodeInterface contract +func (n NodeInterface) BlockL1Num(c ctx, evm mech, l2BlockNum uint64) (uint64, error) { + blockHeader, err := n.backend.HeaderByNumber(n.context, rpc.BlockNumber(l2BlockNum)) + if err != nil { + return 0, err + } + if blockHeader == nil { + return 0, fmt.Errorf("nil header for l2 block: %d", l2BlockNum) + } + blockL1Num := types.DeserializeHeaderExtraInformation(blockHeader).L1BlockNumber + return blockL1Num, nil +} + +func (n NodeInterface) matchL2BlockNumWithL1(c ctx, evm mech, l2BlockNum uint64, l1BlockNum uint64) error { + blockL1Num, err := n.BlockL1Num(c, evm, l2BlockNum) + if err != nil { + return fmt.Errorf("failed to get the L1 block number of the L2 block: %v. Error: %w", l2BlockNum, err) + } + if blockL1Num != l1BlockNum { + return fmt.Errorf("no L2 block was found with the given L1 block number. Found L2 block: %v with L1 block number: %v, given L1 block number: %v", l2BlockNum, blockL1Num, l1BlockNum) + } + return nil +} + +// L2BlockRangeForL1 finds the first and last L2 block numbers that have the given L1 block number +func (n NodeInterface) L2BlockRangeForL1(c ctx, evm mech, l1BlockNum uint64) (uint64, uint64, error) { + currentBlockNum := n.backend.CurrentBlock().Number.Uint64() + genesis := n.backend.ChainConfig().ArbitrumChainParams.GenesisBlockNum + + storedMids := map[uint64]uint64{} + firstL2BlockForL1 := func(target uint64) (uint64, error) { + low, high := genesis, currentBlockNum + highBlockL1Num, err := n.BlockL1Num(c, evm, high) + if err != nil { + return 0, err + } + if highBlockL1Num < target { + return high + 1, nil + } + for low < high { + mid := arbmath.SaturatingUAdd(low, high) / 2 + if _, ok := storedMids[mid]; !ok { + midBlockL1Num, err := n.BlockL1Num(c, evm, mid) + if err != nil { + return 0, err + } + storedMids[mid] = midBlockL1Num + } + if storedMids[mid] < target { + low = mid + 1 + } else { + high = mid + } + } + return high, nil + } + + firstBlock, err := firstL2BlockForL1(l1BlockNum) + if err != nil { + return 0, 0, fmt.Errorf("failed to get the first L2 block with the L1 block: %v. Error: %w", l1BlockNum, err) + } + lastBlock, err := firstL2BlockForL1(l1BlockNum + 1) + if err != nil { + return 0, 0, fmt.Errorf("failed to get the last L2 block with the L1 block: %v. Error: %w", l1BlockNum, err) + } + + if err := n.matchL2BlockNumWithL1(c, evm, firstBlock, l1BlockNum); err != nil { + return 0, 0, err + } + lastBlock -= 1 + if err = n.matchL2BlockNumWithL1(c, evm, lastBlock, l1BlockNum); err != nil { + return 0, 0, err + } + return firstBlock, lastBlock, nil +} diff --git a/nodeInterface/virtual-contracts.go b/nodeInterface/virtual-contracts.go index 29ca3f2b82..ee81c1c3e6 100644 --- a/nodeInterface/virtual-contracts.go +++ b/nodeInterface/virtual-contracts.go @@ -53,6 +53,7 @@ func init() { statedb *state.StateDB, header *types.Header, backend core.NodeInterfaceBackendAPI, + blockCtx *vm.BlockContext, ) (*core.Message, *ExecutionResult, error) { to := msg.To arbosVersion := arbosState.ArbOSVersion(statedb) // check ArbOS has been installed @@ -87,10 +88,7 @@ func init() { return msg, nil, nil } - evm, vmError, err := backend.GetEVM(ctx, msg, statedb, header, &vm.Config{NoBaseFee: true}) - if err != nil { - return msg, nil, err - } + evm, vmError := backend.GetEVM(ctx, msg, statedb, header, &vm.Config{NoBaseFee: true}, blockCtx) go func() { <-ctx.Done() evm.Cancel() diff --git a/relay/relay.go b/relay/relay.go index f4fc33d9e3..bb07251190 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -52,7 +52,7 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { clients, err := broadcastclients.NewBroadcastClients( func() *broadcastclient.Config { return &config.Node.Feed.Input }, - config.L2.ChainId, + config.Chain.ID, 0, &q, confirmedSequenceNumberListener, @@ -70,7 +70,7 @@ func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { return nil, errors.New("relay attempted to sign feed message") } return &Relay{ - broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.L2.ChainId, feedErrChan, dataSignerErr), + broadcaster: broadcaster.NewBroadcaster(func() *wsbroadcastserver.BroadcasterConfig { return &config.Node.Feed.Output }, config.Chain.ID, feedErrChan, dataSignerErr), broadcastClients: clients, confirmedSequenceNumberChan: confirmedSequenceNumberListener, messageChan: q.queue, @@ -141,7 +141,7 @@ func (r *Relay) StopAndWait() { type Config struct { Conf genericconf.ConfConfig `koanf:"conf"` - L2 L2Config `koanf:"chain"` + Chain L2Config `koanf:"chain"` LogLevel int `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` @@ -154,7 +154,7 @@ type Config struct { var ConfigDefault = Config{ Conf: genericconf.ConfConfigDefault, - L2: L2ConfigDefault, + Chain: L2ConfigDefault, LogLevel: int(log.LvlInfo), LogType: "plaintext", Metrics: false, @@ -175,7 +175,7 @@ func ConfigAddOptions(f *flag.FlagSet) { f.Bool("pprof", ConfigDefault.PProf, "enable pprof") genericconf.PProfAddOptions("pprof-cfg", f) NodeConfigAddOptions("node", f) - f.Int("queue", ConfigDefault.Queue, "size of relay queue") + f.Int("queue", ConfigDefault.Queue, "queue for incoming messages from sequencer") } type NodeConfig struct { @@ -191,15 +191,15 @@ func NodeConfigAddOptions(prefix string, f *flag.FlagSet) { } type L2Config struct { - ChainId uint64 `koanf:"id"` + ID uint64 `koanf:"id"` } var L2ConfigDefault = L2Config{ - ChainId: 0, + ID: 0, } func L2ConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Uint64(prefix+".id", L2ConfigDefault.ChainId, "L2 chain ID") + f.Uint64(prefix+".id", L2ConfigDefault.ID, "L2 chain ID") } func ParseRelay(_ context.Context, args []string) (*Config, error) { diff --git a/staker/block_validator.go b/staker/block_validator.go index 109f9d82b2..94bc2a0806 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -19,8 +19,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" - "github.com/offchainlabs/nitro/arbnode/dataposter" - "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/containers" "github.com/offchainlabs/nitro/util/rpcclient" @@ -81,17 +79,13 @@ type BlockValidator struct { type BlockValidatorConfig struct { Enable bool `koanf:"enable"` ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` - ValidationPoll time.Duration `koanf:"check-validations-poll" reload:"hot"` + ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` ForwardBlocks uint64 `koanf:"forward-blocks" reload:"hot"` CurrentModuleRoot string `koanf:"current-module-root"` // TODO(magic) requires reinitialization on hot reload PendingUpgradeModuleRoot string `koanf:"pending-upgrade-module-root"` // TODO(magic) requires StatelessBlockValidator recreation on hot reload FailureIsFatal bool `koanf:"failure-is-fatal" reload:"hot"` Dangerous BlockValidatorDangerousConfig `koanf:"dangerous"` - DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` - RedisUrl string `koanf:"redis-url"` - RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` - ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` } func (c *BlockValidatorConfig) Validate() error { @@ -107,17 +101,13 @@ type BlockValidatorConfigFetcher func() *BlockValidatorConfig func BlockValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultBlockValidatorConfig.Enable, "enable block-by-block validation") rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultBlockValidatorConfig.ValidationServer) - f.Duration(prefix+".check-validations-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") + f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") f.Uint64(prefix+".forward-blocks", DefaultBlockValidatorConfig.ForwardBlocks, "prepare entries for up to that many blocks ahead of validation (small footprint)") f.Uint64(prefix+".prerecorded-blocks", DefaultBlockValidatorConfig.PrerecordedBlocks, "record that many blocks ahead of validation (larger footprint)") f.String(prefix+".current-module-root", DefaultBlockValidatorConfig.CurrentModuleRoot, "current wasm module root ('current' read from chain, 'latest' from machines/latest dir, or provide hash)") f.String(prefix+".pending-upgrade-module-root", DefaultBlockValidatorConfig.PendingUpgradeModuleRoot, "pending upgrade wasm module root to additionally validate (hash, 'latest' or empty)") f.Bool(prefix+".failure-is-fatal", DefaultBlockValidatorConfig.FailureIsFatal, "failing a validation is treated as a fatal error") - f.Uint64(prefix+".extra-gas", DefaultBlockValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") BlockValidatorDangerousConfigAddOptions(prefix+".dangerous", f) - dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) - f.String(prefix+".redis-url", DefaultBlockValidatorConfig.RedisUrl, "redis url for block validator") - redislock.AddConfigOptions(prefix+".redis-lock", f) } func BlockValidatorDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { @@ -134,10 +124,6 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, - DataPoster: dataposter.DefaultDataPosterConfig, - RedisUrl: "", - RedisLock: redislock.DefaultCfg, - ExtraGas: 50000, } var TestBlockValidatorConfig = BlockValidatorConfig{ @@ -150,10 +136,6 @@ var TestBlockValidatorConfig = BlockValidatorConfig{ PendingUpgradeModuleRoot: "latest", FailureIsFatal: true, Dangerous: DefaultBlockValidatorDangerousConfig, - DataPoster: dataposter.TestDataPosterConfig, - RedisUrl: "", - RedisLock: redislock.DefaultCfg, - ExtraGas: 50000, } var DefaultBlockValidatorDangerousConfig = BlockValidatorDangerousConfig{ @@ -615,7 +597,7 @@ func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Dura var batchMsgs arbutil.MessageIndex var printedCount int64 if validated.GlobalState.Batch > 0 { - batchMsgs, err = v.inboxTracker.GetBatchMessageCount(validated.GlobalState.Batch) + batchMsgs, err = v.inboxTracker.GetBatchMessageCount(validated.GlobalState.Batch - 1) } if err != nil { printedCount = -1 diff --git a/staker/eoa_validator_wallet.go b/staker/eoa_validator_wallet.go index f514969434..5285e96ea9 100644 --- a/staker/eoa_validator_wallet.go +++ b/staker/eoa_validator_wallet.go @@ -6,40 +6,36 @@ package staker import ( "context" "fmt" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" - "github.com/offchainlabs/nitro/util/stopwaiter" ) type EoaValidatorWallet struct { - stopwaiter.StopWaiter auth *bind.TransactOpts client arbutil.L1Interface rollupAddress common.Address challengeManager *challengegen.ChallengeManager challengeManagerAddress common.Address dataPoster *dataposter.DataPoster - txCount atomic.Uint64 + getExtraGas func() uint64 } var _ ValidatorWalletInterface = (*EoaValidatorWallet)(nil) -func NewEoaValidatorWallet(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, auth *bind.TransactOpts) (*EoaValidatorWallet, error) { +func NewEoaValidatorWallet(dataPoster *dataposter.DataPoster, rollupAddress common.Address, l1Client arbutil.L1Interface, auth *bind.TransactOpts, getExtraGas func() uint64) (*EoaValidatorWallet, error) { return &EoaValidatorWallet{ auth: auth, client: l1Client, rollupAddress: rollupAddress, dataPoster: dataPoster, - txCount: atomic.Uint64{}, + getExtraGas: getExtraGas, }, nil } @@ -86,52 +82,25 @@ func (w *EoaValidatorWallet) TestTransactions(context.Context, []*types.Transact return nil } -// Polls until the nonce from dataposter catches up with transactions posted -// by validator wallet. -func (w *EoaValidatorWallet) pollForNonce(ctx context.Context) (uint64, error) { - var nonce uint64 - flag := true - for flag { - var err error - select { - // TODO: consider adding config for eoa validator wallet and pull this - // polling time from there. - case <-time.After(100 * time.Millisecond): - nonce, _, err = w.dataPoster.GetNextNonceAndMeta(ctx) - if err != nil { - return 0, fmt.Errorf("get next nonce and meta: %w", err) - } - if nonce >= w.txCount.Load() { - flag = false - break - } - log.Warn("Dataposter nonce too low", "nonce", nonce, "validator tx count", w.txCount.Load()) - case <-ctx.Done(): - return 0, ctx.Err() - } - } - return nonce, nil -} - func (w *EoaValidatorWallet) ExecuteTransactions(ctx context.Context, builder *ValidatorTxBuilder, _ common.Address) (*types.Transaction, error) { if len(builder.transactions) == 0 { return nil, nil } - nonce, err := w.pollForNonce(ctx) + tx := builder.transactions[0] // we ignore future txs and only execute the first + return w.postTransaction(ctx, tx) +} + +func (w *EoaValidatorWallet) postTransaction(ctx context.Context, baseTx *types.Transaction) (*types.Transaction, error) { + nonce, err := w.L1Client().NonceAt(ctx, w.auth.From, nil) if err != nil { - return nil, fmt.Errorf("polling for dataposter nonce to catch up: %w", err) + return nil, err } - if nonce > w.txCount.Load() { - // If this happens, it probably means the dataposter is used by another client, besides validator. - log.Warn("Precondition failure, dataposter nonce is higher than validator transactio count", "dataposter nonce", nonce, "validator tx count", w.txCount.Load()) - } - tx := builder.transactions[0] // we ignore future txs and only execute the first - trans, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *tx.To(), tx.Data(), tx.Gas(), tx.Value()) + gas := baseTx.Gas() + w.getExtraGas() + newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } - w.txCount.Store(nonce) - return trans, nil + return newTx, nil } func (w *EoaValidatorWallet) TimeoutChallenges(ctx context.Context, timeouts []uint64) (*types.Transaction, error) { @@ -140,7 +109,12 @@ func (w *EoaValidatorWallet) TimeoutChallenges(ctx context.Context, timeouts []u } auth := *w.auth auth.Context = ctx - return w.challengeManager.Timeout(&auth, timeouts[0]) + auth.NoSend = true + tx, err := w.challengeManager.Timeout(&auth, timeouts[0]) + if err != nil { + return nil, err + } + return w.postTransaction(ctx, tx) } func (w *EoaValidatorWallet) CanBatchTxs() bool { @@ -153,10 +127,12 @@ func (w *EoaValidatorWallet) AuthIfEoa() *bind.TransactOpts { func (w *EoaValidatorWallet) Start(ctx context.Context) { w.dataPoster.Start(ctx) - w.StopWaiter.Start(ctx, w) } func (b *EoaValidatorWallet) StopAndWait() { - b.StopWaiter.StopAndWait() b.dataPoster.StopAndWait() } + +func (b *EoaValidatorWallet) DataPoster() *dataposter.DataPoster { + return b.dataPoster +} diff --git a/staker/staker.go b/staker/staker.go index 1fe1b83fcf..1b6538b161 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -20,6 +20,8 @@ import ( "github.com/ethereum/go-ethereum/rpc" flag "github.com/spf13/pflag" + "github.com/offchainlabs/nitro/arbnode/dataposter" + "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/util/arbmath" @@ -69,20 +71,24 @@ func L1PostingStrategyAddOptions(prefix string, f *flag.FlagSet) { } type L1ValidatorConfig struct { - Enable bool `koanf:"enable"` - Strategy string `koanf:"strategy"` - StakerInterval time.Duration `koanf:"staker-interval"` - MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` - L1PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` - DisableChallenge bool `koanf:"disable-challenge"` - ConfirmationBlocks int64 `koanf:"confirmation-blocks"` - UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` - OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` - StartFromStaked bool `koanf:"start-validation-from-staked"` - ContractWalletAddress string `koanf:"contract-wallet-address"` - GasRefunderAddress string `koanf:"gas-refunder-address"` - Dangerous DangerousConfig `koanf:"dangerous"` - L1Wallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` + Enable bool `koanf:"enable"` + Strategy string `koanf:"strategy"` + StakerInterval time.Duration `koanf:"staker-interval"` + MakeAssertionInterval time.Duration `koanf:"make-assertion-interval"` + PostingStrategy L1PostingStrategy `koanf:"posting-strategy"` + DisableChallenge bool `koanf:"disable-challenge"` + ConfirmationBlocks int64 `koanf:"confirmation-blocks"` + UseSmartContractWallet bool `koanf:"use-smart-contract-wallet"` + OnlyCreateWalletContract bool `koanf:"only-create-wallet-contract"` + StartValidationFromStaked bool `koanf:"start-validation-from-staked"` + ContractWalletAddress string `koanf:"contract-wallet-address"` + GasRefunderAddress string `koanf:"gas-refunder-address"` + DataPoster dataposter.DataPosterConfig `koanf:"data-poster" reload:"hot"` + RedisUrl string `koanf:"redis-url"` + RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` + ExtraGas uint64 `koanf:"extra-gas" reload:"hot"` + Dangerous DangerousConfig `koanf:"dangerous"` + ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` strategy StakerStrategy gasRefunder common.Address @@ -132,25 +138,50 @@ func (c *L1ValidatorConfig) Validate() error { } var DefaultL1ValidatorConfig = L1ValidatorConfig{ - Enable: true, - Strategy: "Watchtower", - StakerInterval: time.Minute, - MakeAssertionInterval: time.Hour, - L1PostingStrategy: L1PostingStrategy{}, - DisableChallenge: false, - ConfirmationBlocks: 12, - UseSmartContractWallet: false, - OnlyCreateWalletContract: false, - StartFromStaked: true, - ContractWalletAddress: "", - GasRefunderAddress: "", - Dangerous: DefaultDangerousConfig, - L1Wallet: DefaultValidatorL1WalletConfig, + Enable: true, + Strategy: "Watchtower", + StakerInterval: time.Minute, + MakeAssertionInterval: time.Hour, + PostingStrategy: L1PostingStrategy{}, + DisableChallenge: false, + ConfirmationBlocks: 12, + UseSmartContractWallet: false, + OnlyCreateWalletContract: false, + StartValidationFromStaked: true, + ContractWalletAddress: "", + GasRefunderAddress: "", + DataPoster: dataposter.DefaultDataPosterConfigForValidator, + RedisUrl: "", + RedisLock: redislock.DefaultCfg, + ExtraGas: 50000, + Dangerous: DefaultDangerousConfig, + ParentChainWallet: DefaultValidatorL1WalletConfig, +} + +var TestL1ValidatorConfig = L1ValidatorConfig{ + Enable: true, + Strategy: "Watchtower", + StakerInterval: time.Millisecond * 10, + MakeAssertionInterval: 0, + PostingStrategy: L1PostingStrategy{}, + DisableChallenge: false, + ConfirmationBlocks: 0, + UseSmartContractWallet: false, + OnlyCreateWalletContract: false, + StartValidationFromStaked: true, + ContractWalletAddress: "", + GasRefunderAddress: "", + DataPoster: dataposter.TestDataPosterConfigForValidator, + RedisUrl: "", + RedisLock: redislock.DefaultCfg, + ExtraGas: 50000, + Dangerous: DefaultDangerousConfig, + ParentChainWallet: DefaultValidatorL1WalletConfig, } var DefaultValidatorL1WalletConfig = genericconf.WalletConfig{ Pathname: "validator-wallet", - PasswordImpl: genericconf.WalletConfigDefault.PasswordImpl, + Password: genericconf.WalletConfigDefault.Password, PrivateKey: genericconf.WalletConfigDefault.PrivateKey, Account: genericconf.WalletConfigDefault.Account, OnlyCreateKey: genericconf.WalletConfigDefault.OnlyCreateKey, @@ -166,11 +197,15 @@ func L1ValidatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int64(prefix+".confirmation-blocks", DefaultL1ValidatorConfig.ConfirmationBlocks, "confirmation blocks") f.Bool(prefix+".use-smart-contract-wallet", DefaultL1ValidatorConfig.UseSmartContractWallet, "use a smart contract wallet instead of an EOA address") f.Bool(prefix+".only-create-wallet-contract", DefaultL1ValidatorConfig.OnlyCreateWalletContract, "only create smart wallet contract and exit") - f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartFromStaked, "assume staked nodes are valid") + f.Bool(prefix+".start-validation-from-staked", DefaultL1ValidatorConfig.StartValidationFromStaked, "assume staked nodes are valid") f.String(prefix+".contract-wallet-address", DefaultL1ValidatorConfig.ContractWalletAddress, "validator smart contract wallet public address") f.String(prefix+".gas-refunder-address", DefaultL1ValidatorConfig.GasRefunderAddress, "The gas refunder contract address (optional)") + f.String(prefix+".redis-url", DefaultL1ValidatorConfig.RedisUrl, "redis url for L1 validator") + f.Uint64(prefix+".extra-gas", DefaultL1ValidatorConfig.ExtraGas, "use this much more gas than estimation says is necessary to post transactions") + dataposter.DataPosterConfigAddOptions(prefix+".data-poster", f) + redislock.AddConfigOptions(prefix+".redis-lock", f) DangerousConfigAddOptions(prefix+".dangerous", f) - genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.L1Wallet.Pathname) + genericconf.WalletConfigAddOptions(prefix+".parent-chain-wallet", f, DefaultL1ValidatorConfig.ParentChainWallet.Pathname) } type DangerousConfig struct { @@ -242,7 +277,7 @@ func NewStaker( return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config.StartFromStaked && blockValidator != nil { + if config.StartValidationFromStaked && blockValidator != nil { stakedNotifiers = append(stakedNotifiers, blockValidator) } return &Staker{ @@ -252,7 +287,7 @@ func NewStaker( confirmedNotifiers: confirmedNotifiers, baseCallOpts: callOpts, config: config, - highGasBlocksBuffer: big.NewInt(config.L1PostingStrategy.HighGasDelayBlocks), + highGasBlocksBuffer: big.NewInt(config.PostingStrategy.HighGasDelayBlocks), lastActCalledBlock: nil, inboxReader: statelessBlockValidator.inboxReader, statelessBlockValidator: statelessBlockValidator, @@ -269,7 +304,7 @@ func (s *Staker) Initialize(ctx context.Context) error { if walletAddressOrZero != (common.Address{}) { s.updateStakerBalanceMetric(ctx) } - if s.blockValidator != nil && s.config.StartFromStaked { + if s.blockValidator != nil && s.config.StartValidationFromStaked { latestStaked, _, err := s.validatorUtils.LatestStaked(&s.baseCallOpts, s.rollupAddress, walletAddressOrZero) if err != nil { return err @@ -337,11 +372,15 @@ func (s *Staker) getLatestStakedState(ctx context.Context, staker common.Address func (s *Staker) StopAndWait() { s.StopWaiter.StopAndWait() - s.wallet.StopAndWait() + if s.Strategy() != WatchtowerStrategy { + s.wallet.StopAndWait() + } } func (s *Staker) Start(ctxIn context.Context) { - s.wallet.Start(ctxIn) + if s.Strategy() != WatchtowerStrategy { + s.wallet.Start(ctxIn) + } s.StopWaiter.Start(ctxIn, s) backoff := time.Second s.CallIteratively(func(ctx context.Context) (returningWait time.Duration) { @@ -450,7 +489,7 @@ func (s *Staker) shouldAct(ctx context.Context) bool { log.Warn("error getting gas price", "err", err) } else { gasPriceFloat = float64(gasPrice.Int64()) / 1e9 - if gasPriceFloat >= s.config.L1PostingStrategy.HighGasThreshold { + if gasPriceFloat >= s.config.PostingStrategy.HighGasThreshold { gasPriceHigh = true } } @@ -475,14 +514,14 @@ func (s *Staker) shouldAct(ctx context.Context) bool { // Clamp `s.highGasBlocksBuffer` to between 0 and HighGasDelayBlocks if s.highGasBlocksBuffer.Sign() < 0 { s.highGasBlocksBuffer.SetInt64(0) - } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.L1PostingStrategy.HighGasDelayBlocks)) > 0 { - s.highGasBlocksBuffer.SetInt64(s.config.L1PostingStrategy.HighGasDelayBlocks) + } else if s.highGasBlocksBuffer.Cmp(big.NewInt(s.config.PostingStrategy.HighGasDelayBlocks)) > 0 { + s.highGasBlocksBuffer.SetInt64(s.config.PostingStrategy.HighGasDelayBlocks) } if gasPriceHigh && s.highGasBlocksBuffer.Sign() > 0 { log.Warn( "not acting yet as gas price is high", "gasPrice", gasPriceFloat, - "highGasPriceConfig", s.config.L1PostingStrategy.HighGasThreshold, + "highGasPriceConfig", s.config.PostingStrategy.HighGasThreshold, "highGasBuffer", s.highGasBlocksBuffer, ) return false @@ -490,8 +529,34 @@ func (s *Staker) shouldAct(ctx context.Context) bool { return true } +func (s *Staker) confirmDataPosterIsReady(ctx context.Context) error { + dp := s.wallet.DataPoster() + if dp == nil { + return nil + } + dataPosterNonce, _, err := dp.GetNextNonceAndMeta(ctx) + if err != nil { + return err + } + latestNonce, err := s.l1Reader.Client().NonceAt(ctx, dp.Sender(), nil) + if err != nil { + return err + } + if dataPosterNonce > latestNonce { + return fmt.Errorf("data poster nonce %v is ahead of on-chain nonce %v -- probably waiting for a pending transaction to be included in a block", dataPosterNonce, latestNonce) + } + if dataPosterNonce < latestNonce { + return fmt.Errorf("data poster nonce %v is behind on-chain nonce %v -- is something else making transactions on this address?", dataPosterNonce, latestNonce) + } + return nil +} + func (s *Staker) Act(ctx context.Context) (*types.Transaction, error) { if s.config.strategy != WatchtowerStrategy { + err := s.confirmDataPosterIsReady(ctx) + if err != nil { + return nil, err + } whitelisted, err := s.IsWhitelisted(ctx) if err != nil { return nil, fmt.Errorf("error checking if whitelisted: %w", err) diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 7fe913d921..e290ffad67 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "regexp" "sync" "testing" @@ -453,8 +454,9 @@ func (v *StatelessBlockValidator) Start(ctx_in context.Context) error { } v.pendingWasmModuleRoot = latest } else { + valid, _ := regexp.MatchString("(0x)?[0-9a-fA-F]{64}", v.config.PendingUpgradeModuleRoot) v.pendingWasmModuleRoot = common.HexToHash(v.config.PendingUpgradeModuleRoot) - if (v.pendingWasmModuleRoot == common.Hash{}) { + if (!valid || v.pendingWasmModuleRoot == common.Hash{}) { return errors.New("pending-upgrade-module-root config value illegal") } } diff --git a/staker/validator_wallet.go b/staker/validator_wallet.go index 6c940d8550..fb0f5ed956 100644 --- a/staker/validator_wallet.go +++ b/staker/validator_wallet.go @@ -24,7 +24,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/headerreader" - "github.com/offchainlabs/nitro/util/stopwaiter" ) var validatorABI abi.ABI @@ -61,10 +60,11 @@ type ValidatorWalletInterface interface { AuthIfEoa() *bind.TransactOpts Start(context.Context) StopAndWait() + // May be nil + DataPoster() *dataposter.DataPoster } type ContractValidatorWallet struct { - stopwaiter.StopWaiter con *rollupgen.ValidatorWallet address atomic.Pointer[common.Address] onWalletCreated func(common.Address) @@ -76,13 +76,13 @@ type ContractValidatorWallet struct { rollupAddress common.Address challengeManagerAddress common.Address dataPoster *dataposter.DataPoster - extraGas uint64 + getExtraGas func() uint64 } var _ ValidatorWalletInterface = (*ContractValidatorWallet)(nil) func NewContractValidatorWallet(dp *dataposter.DataPoster, address *common.Address, walletFactoryAddr, rollupAddress common.Address, l1Reader *headerreader.HeaderReader, auth *bind.TransactOpts, rollupFromBlock int64, onWalletCreated func(common.Address), - extraGas uint64) (*ContractValidatorWallet, error) { + getExtraGas func() uint64) (*ContractValidatorWallet, error) { var con *rollupgen.ValidatorWallet if address != nil { var err error @@ -105,7 +105,7 @@ func NewContractValidatorWallet(dp *dataposter.DataPoster, address *common.Addre rollup: rollup, rollupFromBlock: rollupFromBlock, dataPoster: dp, - extraGas: extraGas, + getExtraGas: getExtraGas, } // Go complains if we make an address variable before wallet and copy it in wallet.address.Store(address) @@ -344,7 +344,7 @@ func (v *ContractValidatorWallet) estimateGas(ctx context.Context, value *big.In if err != nil { return 0, fmt.Errorf("estimating gas: %w", err) } - return g + v.extraGas, nil + return g + v.getExtraGas(), nil } func (v *ContractValidatorWallet) TimeoutChallenges(ctx context.Context, challenges []uint64) (*types.Transaction, error) { @@ -411,11 +411,15 @@ func (v *ContractValidatorWallet) AuthIfEoa() *bind.TransactOpts { } func (w *ContractValidatorWallet) Start(ctx context.Context) { - w.StopWaiter.Start(ctx, w) + w.dataPoster.Start(ctx) } func (b *ContractValidatorWallet) StopAndWait() { - b.StopWaiter.StopAndWait() + b.dataPoster.StopAndWait() +} + +func (b *ContractValidatorWallet) DataPoster() *dataposter.DataPoster { + return b.dataPoster } func GetValidatorWalletContract( diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 6f6c041c41..11bf92608b 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -76,7 +76,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Require(t, err) seqTxOpts := l1info.GetDefaultTransactOpts("Sequencer", ctx) conf.BatchPoster.Enable = true - conf.BatchPoster.MaxBatchSize = len(firstTxData) * 2 + conf.BatchPoster.MaxSize = len(firstTxData) * 2 startL1Block, err := l1client.BlockNumber(ctx) Require(t, err) for i := 0; i < parallelBatchPosters; i++ { @@ -175,7 +175,7 @@ func TestBatchPosterKeepsUp(t *testing.T) { conf := arbnode.ConfigDefaultL1Test() conf.BatchPoster.CompressionLevel = brotli.BestCompression - conf.BatchPoster.MaxBatchPostDelay = time.Hour + conf.BatchPoster.MaxDelay = time.Hour conf.RPC.RPCTxFeeCap = 1000. l2info, nodeA, l2clientA, _, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, conf, nil, nil) defer requireClose(t, l1stack) diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 7fe1a65969..fa3d902b18 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -46,7 +46,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops var delayEvery int if workloadLoops > 1 { - l1NodeConfigA.BatchPoster.MaxBatchPostDelay = time.Millisecond * 500 + l1NodeConfigA.BatchPoster.MaxDelay = time.Millisecond * 500 delayEvery = workloadLoops / 3 } @@ -59,7 +59,7 @@ func testBlockValidatorSimple(t *testing.T, dasModeString string, workloadLoops validatorConfig := arbnode.ConfigDefaultL1NonSequencerTest() validatorConfig.BlockValidator.Enable = true validatorConfig.DataAvailability = l1NodeConfigA.DataAvailability - validatorConfig.DataAvailability.AggregatorConfig.Enable = false + validatorConfig.DataAvailability.RPCAggregator.Enable = false AddDefaultValNode(t, ctx, validatorConfig, !arbitrator) l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, validatorConfig, nil) defer nodeB.StopAndWait() diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 83d0202648..9fd002bd94 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -433,7 +433,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no }}) Require(t, stack.Start()) - Require(t, l1backend.StartMining(1)) + Require(t, l1backend.StartMining()) rpcClient, err := stack.Attach() Require(t, err) @@ -475,13 +475,19 @@ func DeployOnTestL1( Require(t, err) serializedChainConfig, err := json.Marshal(chainConfig) Require(t, err) + + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) + Require(t, err) + l1Reader.Start(ctx) + defer l1Reader.StopAndWait() + addresses, err := arbnode.DeployOnL1( ctx, - l1client, + l1Reader, &l1TransactionOpts, l1info.GetAddress("Sequencer"), 0, - func() *headerreader.Config { return &headerreader.TestConfig }, arbnode.GenerateRollupConfig(false, locator.LatestWasmModuleRoot(), l1info.GetAddress("RollupOwner"), chainConfig, serializedChainConfig, common.Address{}), ) Require(t, err) @@ -832,19 +838,19 @@ func setupConfigWithDAS( dasConfig := &das.DataAvailabilityConfig{ Enable: enableDas, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: dbPath, }, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: enableFileStorage, DataDir: dbPath, }, - LocalDBStorageConfig: das.LocalDBStorageConfig{ + LocalDBStorage: das.LocalDBStorageConfig{ Enable: enableDbStorage, DataDir: dbPath, }, RequestTimeout: 5 * time.Second, - L1NodeURL: "none", + ParentChainNodeURL: "none", SequencerInboxAddress: "none", PanicOnError: true, DisableSignatureChecking: true, @@ -873,12 +879,12 @@ func setupConfigWithDAS( PubKeyBase64Encoded: blsPubToBase64(dasSignerKey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, beConfigA) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" } return chainConfig, l1NodeConfigA, lifecycleManager, dbPath, dasSignerKey diff --git a/system_tests/conditionaltx_test.go b/system_tests/conditionaltx_test.go index c65103694a..14aa000313 100644 --- a/system_tests/conditionaltx_test.go +++ b/system_tests/conditionaltx_test.go @@ -16,7 +16,7 @@ import ( "github.com/ethereum/go-ethereum/arbitrum" "github.com/ethereum/go-ethereum/arbitrum_types" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rpc" @@ -103,23 +103,23 @@ func getOptions(address common.Address, rootHash common.Hash, slotValueMap map[c } func getFulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := hexutil.Uint64(timestamp + 30) - past := hexutil.Uint64(timestamp - 1) - futureBlockNumber := hexutil.Uint64(blockNumber + 1000) - currentBlockNumber := hexutil.Uint64(blockNumber) + future := math.HexOrDecimal64(timestamp + 30) + past := math.HexOrDecimal64(timestamp - 1) + futureBlockNumber := math.HexOrDecimal64(blockNumber + 1000) + currentBlockNumber := math.HexOrDecimal64(blockNumber) return getBlockTimeLimits(t, currentBlockNumber, futureBlockNumber, past, future) } func getUnfulfillableBlockTimeLimits(t *testing.T, blockNumber uint64, timestamp uint64) []*arbitrum_types.ConditionalOptions { - future := hexutil.Uint64(timestamp + 30) - past := hexutil.Uint64(timestamp - 1) - futureBlockNumber := hexutil.Uint64(blockNumber + 1000) - previousBlockNumber := hexutil.Uint64(blockNumber - 1) + future := math.HexOrDecimal64(timestamp + 30) + past := math.HexOrDecimal64(timestamp - 1) + futureBlockNumber := math.HexOrDecimal64(blockNumber + 1000) + previousBlockNumber := math.HexOrDecimal64(blockNumber - 1) // skip first empty options return getBlockTimeLimits(t, futureBlockNumber, previousBlockNumber, future, past)[1:] } -func getBlockTimeLimits(t *testing.T, blockMin, blockMax hexutil.Uint64, timeMin, timeMax hexutil.Uint64) []*arbitrum_types.ConditionalOptions { +func getBlockTimeLimits(t *testing.T, blockMin, blockMax math.HexOrDecimal64, timeMin, timeMax math.HexOrDecimal64) []*arbitrum_types.ConditionalOptions { basic := []*arbitrum_types.ConditionalOptions{ {}, {TimestampMin: &timeMin}, @@ -157,9 +157,9 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* c.KnownAccounts[k] = v } limitTriples := []struct { - a *hexutil.Uint64 - b *hexutil.Uint64 - c **hexutil.Uint64 + a *math.HexOrDecimal64 + b *math.HexOrDecimal64 + c **math.HexOrDecimal64 }{ {a.BlockNumberMin, b.BlockNumberMin, &c.BlockNumberMin}, {a.BlockNumberMax, b.BlockNumberMax, &c.BlockNumberMax}, @@ -168,10 +168,10 @@ func optionsProduct(optionsA, optionsB []*arbitrum_types.ConditionalOptions) []* } for _, tripple := range limitTriples { if tripple.b != nil { - value := hexutil.Uint64(*tripple.b) + value := math.HexOrDecimal64(*tripple.b) *tripple.c = &value } else if tripple.a != nil { - value := hexutil.Uint64(*tripple.a) + value := math.HexOrDecimal64(*tripple.a) *tripple.c = &value } else { *tripple.c = nil diff --git a/system_tests/das_test.go b/system_tests/das_test.go index a597ecf618..8889d2d53d 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -28,6 +28,7 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/solgen/go/bridgegen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" ) @@ -45,15 +46,15 @@ func startLocalDASServer( config := das.DataAvailabilityConfig{ Enable: true, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: keyDir, }, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: dataDir, }, - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } var syncFromStorageServices []*das.IterableStorageService @@ -64,7 +65,7 @@ func startLocalDASServer( Require(t, err) seqInboxCaller, err := bridgegen.NewSequencerInboxCaller(seqInboxAddress, l1client) Require(t, err) - privKey, err := config.KeyConfig.BLSPrivKey() + privKey, err := config.Key.BLSPrivKey() Require(t, err) daWriter, err := das.NewSignAfterStoreDASWriterWithSeqInboxCaller(privKey, seqInboxCaller, storageService, "") Require(t, err) @@ -132,11 +133,11 @@ func TestDASRekey(t *testing.T) { // Setup DAS config l1NodeConfigA.DataAvailability.Enable = true - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, backendConfigA) - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{restServerUrlA} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigA) + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) @@ -145,11 +146,11 @@ func TestDASRekey(t *testing.T) { l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{restServerUrlA} + l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigB.DataAvailability.RestAggregator.Enable = true + l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{restServerUrlA} - l1NodeConfigB.DataAvailability.L1NodeURL = "none" + l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) @@ -179,7 +180,7 @@ func TestDASRekey(t *testing.T) { l2blockchain, err := execution.GetBlockChain(l2chainDb, nil, chainConfig, arbnode.ConfigDefaultL2Test().TxLookupLimit) Require(t, err) - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, backendConfigB) + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, backendConfigB) nodeA, err := arbnode.CreateNode(ctx, l2stackA, l2chainDb, l2arbDb, NewFetcherFromConfig(l1NodeConfigA), l2blockchain, l1client, addresses, sequencerTxOptsPtr, sequencerTxOptsPtr, nil, feedErrChan) Require(t, err) Require(t, nodeA.Start(ctx)) @@ -233,7 +234,8 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { chainConfig := params.ArbitrumDevTestDASChainConfig() l1info, l1client, _, l1stack := createTestL1BlockChain(t, nil) defer requireClose(t, l1stack) - l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }) + arbSys, _ := precompilesgen.NewArbSys(types.ArbSysAddress, l1client) + l1Reader, err := headerreader.New(ctx, l1client, func() *headerreader.Config { return &headerreader.TestConfig }, arbSys) Require(t, err) l1Reader.Start(ctx) defer l1Reader.StopAndWait() @@ -247,18 +249,18 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { serverConfig := das.DataAvailabilityConfig{ Enable: true, - LocalCacheConfig: das.TestBigCacheConfig, + LocalCache: das.TestBigCacheConfig, - LocalFileStorageConfig: das.LocalFileStorageConfig{ + LocalFileStorage: das.LocalFileStorageConfig{ Enable: true, DataDir: fileDataDir, }, - LocalDBStorageConfig: das.LocalDBStorageConfig{ + LocalDBStorage: das.LocalDBStorageConfig{ Enable: true, DataDir: dbDataDir, }, - KeyConfig: das.KeyConfig{ + Key: das.KeyConfig{ KeyDir: keyDir, }, @@ -293,11 +295,11 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { PubKeyBase64Encoded: blsPubToBase64(pubkey), SignerMask: 1, } - l1NodeConfigA.DataAvailability.AggregatorConfig = aggConfigForBackend(t, beConfigA) - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigA.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigA.DataAvailability.L1NodeURL = "none" + l1NodeConfigA.DataAvailability.RPCAggregator = aggConfigForBackend(t, beConfigA) + l1NodeConfigA.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigA.DataAvailability.RestAggregator.Enable = true + l1NodeConfigA.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigA.DataAvailability.ParentChainNodeURL = "none" dataSigner := signature.DataSignerFromPrivateKey(l1info.Accounts["Sequencer"].PrivateKey) @@ -321,16 +323,16 @@ func TestDASComplexConfigAndRestMirror(t *testing.T) { // AggregatorConfig set up below - L1NodeURL: "none", - RequestTimeout: 5 * time.Second, + ParentChainNodeURL: "none", + RequestTimeout: 5 * time.Second, } l1NodeConfigB.BlockValidator.Enable = false l1NodeConfigB.DataAvailability.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig = das.DefaultRestfulClientAggregatorConfig - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Enable = true - l1NodeConfigB.DataAvailability.RestfulClientAggregatorConfig.Urls = []string{"http://" + restLis.Addr().String()} - l1NodeConfigB.DataAvailability.L1NodeURL = "none" + l1NodeConfigB.DataAvailability.RestAggregator = das.DefaultRestfulClientAggregatorConfig + l1NodeConfigB.DataAvailability.RestAggregator.Enable = true + l1NodeConfigB.DataAvailability.RestAggregator.Urls = []string{"http://" + restLis.Addr().String()} + l1NodeConfigB.DataAvailability.ParentChainNodeURL = "none" l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, l1NodeConfigB, nil) checkBatchPosting(t, ctx, l1client, l2clientA, l1info, l2info, big.NewInt(1e12), l2clientB) diff --git a/system_tests/forwarder_test.go b/system_tests/forwarder_test.go index 0e5cca319a..0a954719d8 100644 --- a/system_tests/forwarder_test.go +++ b/system_tests/forwarder_test.go @@ -48,7 +48,7 @@ func TestStaticForwarder(t *testing.T) { nodeConfigB.Sequencer.Enable = false nodeConfigB.DelayedSequencer.Enable = false nodeConfigB.Forwarder.RedisUrl = "" - nodeConfigB.ForwardingTargetImpl = ipcPath + nodeConfigB.ForwardingTarget = ipcPath nodeConfigB.BatchPoster.Enable = false clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, nodeConfigB, nil) @@ -104,7 +104,7 @@ func fallbackSequencer( nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.SeqCoordinator.Enable = opts.enableSecCoordinator nodeConfig.SeqCoordinator.RedisUrl = opts.redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = opts.ipcPath + nodeConfig.SeqCoordinator.MyUrl = opts.ipcPath return createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, nil, stackConfig) } @@ -127,8 +127,9 @@ func createForwardingNode( nodeConfig := arbnode.ConfigDefaultL1Test() nodeConfig.Sequencer.Enable = false nodeConfig.DelayedSequencer.Enable = false + nodeConfig.BatchPoster.Enable = false nodeConfig.Forwarder.RedisUrl = redisUrl - nodeConfig.ForwardingTargetImpl = fallbackPath + nodeConfig.ForwardingTarget = fallbackPath // nodeConfig.Feed.Output.Enable = false return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) @@ -148,10 +149,10 @@ func createSequencer( ipcConfig.Path = ipcPath ipcConfig.Apply(stackConfig) nodeConfig := arbnode.ConfigDefaultL1Test() - nodeConfig.BatchPoster.Enable = true + nodeConfig.BatchPoster.Enable = false nodeConfig.SeqCoordinator.Enable = true nodeConfig.SeqCoordinator.RedisUrl = redisUrl - nodeConfig.SeqCoordinator.MyUrlImpl = ipcPath + nodeConfig.SeqCoordinator.MyUrl = ipcPath return Create2ndNodeWithConfig(t, ctx, first, l1stack, l1info, l2InitData, nodeConfig, stackConfig) } diff --git a/system_tests/nodeinterface_test.go b/system_tests/nodeinterface_test.go new file mode 100644 index 0000000000..167f2204cd --- /dev/null +++ b/system_tests/nodeinterface_test.go @@ -0,0 +1,75 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" +) + +func TestL2BlockRangeForL1(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l2info, node, l2client, l1info, _, _, l1stack := createTestNodeOnL1(t, ctx, true) + defer requireClose(t, l1stack) + defer node.StopAndWait() + user := l1info.GetDefaultTransactOpts("User", ctx) + + numTransactions := 200 + for i := 0; i < numTransactions; i++ { + TransferBalanceTo(t, "Owner", util.RemapL1Address(user.From), big.NewInt(1e18), l2info, l2client, ctx) + } + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, l2client) + if err != nil { + t.Fatalf("Error creating node interface: %v", err) + } + + l1BlockNums := map[uint64]*[2]uint64{} + latestL2, err := l2client.BlockNumber(ctx) + if err != nil { + t.Fatalf("Error querying most recent l2 block: %v", err) + } + for l2BlockNum := uint64(0); l2BlockNum <= latestL2; l2BlockNum++ { + l1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, l2BlockNum) + if err != nil { + t.Fatalf("Error quering l1 block number for l2 block: %d, error: %v", l2BlockNum, err) + } + if _, ok := l1BlockNums[l1BlockNum]; !ok { + l1BlockNums[l1BlockNum] = &[2]uint64{l2BlockNum, l2BlockNum} + } + l1BlockNums[l1BlockNum][1] = l2BlockNum + } + + // Test success. + for l1BlockNum := range l1BlockNums { + rng, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, l1BlockNum) + if err != nil { + t.Fatalf("Error getting l2 block range for l1 block: %d, error: %v", l1BlockNum, err) + } + expected := l1BlockNums[l1BlockNum] + if rng.FirstBlock != expected[0] || rng.LastBlock != expected[1] { + unexpectedL1BlockNum, err := nodeInterface.BlockL1Num(&bind.CallOpts{}, rng.LastBlock) + if err != nil { + t.Fatalf("Error quering l1 block number for l2 block: %d, error: %v", rng.LastBlock, err) + } + // Handle the edge case when new l2 blocks are produced between latestL2 was last calculated and now. + if unexpectedL1BlockNum != l1BlockNum || rng.LastBlock < expected[1] || rng.FirstBlock != expected[0] { + t.Errorf("L2BlockRangeForL1(%d) = (%d %d) want (%d %d)", l1BlockNum, rng.FirstBlock, rng.LastBlock, expected[0], expected[1]) + } + } + } + // Test invalid case. + if _, err := nodeInterface.L2BlockRangeForL1(&bind.CallOpts{}, 1e5); err == nil { + t.Fatalf("GetL2BlockRangeForL1 didn't fail for an invalid input") + } +} diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 29bfdd6e6f..b1dd32d1dc 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -319,7 +319,7 @@ func TestSubmissionGasCosts(t *testing.T) { usefulGas := params.TxGas excessGasLimit := uint64(808) - maxSubmissionFee := big.NewInt(1e13) + maxSubmissionFee := big.NewInt(1e14) retryableGas := arbmath.UintToBig(usefulGas + excessGasLimit) // will only burn the intrinsic cost retryableL2CallValue := big.NewInt(1e4) retryableCallData := []byte{} @@ -358,8 +358,10 @@ func TestSubmissionGasCosts(t *testing.T) { if redeemReceipt.Status != types.ReceiptStatusSuccessful { Fatal(t, "first retry tx failed") } + redeemBlock, err := l2client.HeaderByNumber(ctx, redeemReceipt.BlockNumber) + Require(t, err) - l2BaseFee := GetBaseFee(t, l2client, ctx) + l2BaseFee := redeemBlock.BaseFee excessGasPrice := arbmath.BigSub(gasFeeCap, l2BaseFee) excessWei := arbmath.BigMulByUint(l2BaseFee, excessGasLimit) excessWei.Add(excessWei, arbmath.BigMul(excessGasPrice, retryableGas)) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 2209e82d93..b1f50c9436 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -61,7 +61,7 @@ func TestRedisSeqCoordinatorPriorities(t *testing.T) { initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) createStartNode := func(nodeNum int) { - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[nodeNum] + nodeConfig.SeqCoordinator.MyUrl = nodeNames[nodeNum] _, node, _ := CreateTestL2WithConfig(t, ctx, l2Info, nodeConfig, false) nodes[nodeNum] = node } @@ -277,7 +277,7 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { initRedisForTest(t, ctx, nodeConfig.SeqCoordinator.RedisUrl, nodeNames) - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[0] + nodeConfig.SeqCoordinator.MyUrl = nodeNames[0] l2Info, nodeA, clientA, l1info, _, _, l1stack := createTestNodeOnL1WithConfig(t, ctx, true, nodeConfig, params.ArbitrumDevTestChainConfig(), nil) defer requireClose(t, l1stack) defer nodeA.StopAndWait() @@ -302,10 +302,10 @@ func testCoordinatorMessageSync(t *testing.T, successCase bool) { nodeConfigDup := *nodeConfig nodeConfig = &nodeConfigDup - nodeConfig.SeqCoordinator.MyUrlImpl = nodeNames[1] + nodeConfig.SeqCoordinator.MyUrl = nodeNames[1] if !successCase { - nodeConfig.SeqCoordinator.Signing.ECDSA.AcceptSequencer = false - nodeConfig.SeqCoordinator.Signing.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} + nodeConfig.SeqCoordinator.Signer.ECDSA.AcceptSequencer = false + nodeConfig.SeqCoordinator.Signer.ECDSA.AllowedAddresses = []string{l2Info.GetAddress("User2").Hex()} } clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2Info.ArbInitData, nodeConfig, nil) defer nodeB.StopAndWait() diff --git a/system_tests/seqfeed_test.go b/system_tests/seqfeed_test.go index 0ae72e384f..23c0e44c02 100644 --- a/system_tests/seqfeed_test.go +++ b/system_tests/seqfeed_test.go @@ -27,9 +27,9 @@ func newBroadcasterConfigTest() *wsbroadcastserver.BroadcasterConfig { func newBroadcastClientConfigTest(port int) *broadcastclient.Config { return &broadcastclient.Config{ - URLs: []string{fmt.Sprintf("ws://localhost:%d/feed", port)}, + URL: []string{fmt.Sprintf("ws://localhost:%d/feed", port)}, Timeout: 200 * time.Millisecond, - Verifier: signature.VerifierConfig{ + Verify: signature.VerifierConfig{ Dangerous: signature.DangerousVerifierConfig{ AcceptMissing: true, }, @@ -89,7 +89,7 @@ func TestRelayedSequencerFeed(t *testing.T) { port := nodeA.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port config.Node.Feed.Input = *newBroadcastClientConfigTest(port) config.Node.Feed.Output = *newBroadcasterConfigTest() - config.L2.ChainId = bigChainId.Uint64() + config.Chain.ID = bigChainId.Uint64() feedErrChan := make(chan error, 10) currentRelay, err := relay.NewRelay(&config, feedErrChan) @@ -145,7 +145,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigC := arbnode.ConfigDefaultL1Test() nodeConfigC.BatchPoster.Enable = false nodeConfigC.DataAvailability = nodeConfigA.DataAvailability - nodeConfigC.DataAvailability.AggregatorConfig.Enable = false + nodeConfigC.DataAvailability.RPCAggregator.Enable = false nodeConfigC.Feed.Output = *newBroadcasterConfigTest() l2clientC, nodeC := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigC, nil) defer nodeC.StopAndWait() @@ -157,7 +157,7 @@ func testLyingSequencer(t *testing.T, dasModeStr string) { nodeConfigB.Feed.Output.Enable = false nodeConfigB.Feed.Input = *newBroadcastClientConfigTest(port) nodeConfigB.DataAvailability = nodeConfigA.DataAvailability - nodeConfigB.DataAvailability.AggregatorConfig.Enable = false + nodeConfigB.DataAvailability.RPCAggregator.Enable = false l2clientB, nodeB := Create2ndNodeWithConfig(t, ctx, nodeA, l1stack, l1info, &l2infoA.ArbInitData, nodeConfigB, nil) defer nodeB.StopAndWait() diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 468463d58f..96ea1ee2e7 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -128,13 +128,13 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, l1client) Require(t, err) - valConfig := staker.L1ValidatorConfig{} + valConfig := staker.TestL1ValidatorConfig - dpA, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpA, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletA, err := staker.NewContractValidatorWallet(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, 10000) + valWalletA, err := staker.NewContractValidatorWallet(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfig.ExtraGas }) Require(t, err) if honestStakerInactive { valConfig.Strategy = "Defensive" @@ -178,11 +178,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } Require(t, err) - dpB, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpB, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeB.L1Reader, &l1authB, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletB, err := staker.NewEoaValidatorWallet(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), &l1authB) + valWalletB, err := staker.NewEoaValidatorWallet(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), &l1authB, func() uint64 { return 0 }) Require(t, err) valConfig.Strategy = "MakeNodes" statelessB, err := staker.NewStatelessBlockValidator( @@ -217,11 +217,11 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) err = valWalletB.Initialize(ctx) Require(t, err) } - dpC, err := arbnode.ValidatorDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.BlockValidatorPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) + dpC, err := arbnode.StakerDataposter(rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), l2nodeA.L1Reader, &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), nil) if err != nil { t.Fatalf("Error creating validator dataposter: %v", err) } - valWalletC, err := staker.NewContractValidatorWallet(dpC, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, nil, 0, func(common.Address) {}, 10000) + valWalletC, err := staker.NewContractValidatorWallet(dpC, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, nil, 0, func(common.Address) {}, func() uint64 { return 10000 }) Require(t, err) valConfig.Strategy = "Watchtower" stakerC, err := staker.NewStaker( diff --git a/system_tests/twonodes_test.go b/system_tests/twonodes_test.go index 165b01b35a..72de2aa50a 100644 --- a/system_tests/twonodes_test.go +++ b/system_tests/twonodes_test.go @@ -26,7 +26,7 @@ func testTwoNodesSimple(t *testing.T, dasModeStr string) { authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability - l1NodeConfigBDataAvailability.AggregatorConfig.Enable = false + l1NodeConfigBDataAvailability.RPCAggregator.Enable = false l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) defer nodeB.StopAndWait() diff --git a/system_tests/twonodeslong_test.go b/system_tests/twonodeslong_test.go index 3987e5cf7b..0cac9d6442 100644 --- a/system_tests/twonodeslong_test.go +++ b/system_tests/twonodeslong_test.go @@ -48,7 +48,7 @@ func testTwoNodesLong(t *testing.T, dasModeStr string) { authorizeDASKeyset(t, ctx, dasSignerKey, l1info, l1client) l1NodeConfigBDataAvailability := l1NodeConfigA.DataAvailability - l1NodeConfigBDataAvailability.AggregatorConfig.Enable = false + l1NodeConfigBDataAvailability.RPCAggregator.Enable = false l2clientB, nodeB := Create2ndNode(t, ctx, nodeA, l1stack, l1info, &l2info.ArbInitData, &l1NodeConfigBDataAvailability) defer nodeB.StopAndWait() diff --git a/system_tests/validation_mock_test.go b/system_tests/validation_mock_test.go index da6958b028..b6ebd02478 100644 --- a/system_tests/validation_mock_test.go +++ b/system_tests/validation_mock_test.go @@ -330,7 +330,7 @@ func TestExecutionKeepAlive(t *testing.T) { defer cancel() _, validationDefault := createMockValidationNode(t, ctx, nil) shortTimeoutConfig := server_arb.DefaultArbitratorSpawnerConfig - shortTimeoutConfig.ExecRunTimeout = time.Second + shortTimeoutConfig.ExecutionRunTimeout = time.Second _, validationShortTO := createMockValidationNode(t, ctx, &shortTimeoutConfig) configFetcher := StaticFetcherFrom(t, &rpcclient.TestClientConfig) diff --git a/util/arbmath/math.go b/util/arbmath/math.go index a9758db1c0..467ee58a14 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -175,6 +175,11 @@ func BigAddByUint(augend *big.Int, addend uint64) *big.Int { return new(big.Int).Add(augend, UintToBig(addend)) } +// BigSub subtracts a uint from a huge +func BigSubByUint(minuend *big.Int, subtrahend uint64) *big.Int { + return new(big.Int).Sub(minuend, UintToBig(subtrahend)) +} + // BigMulByFrac multiply a huge by a rational func BigMulByFrac(value *big.Int, numerator, denominator int64) *big.Int { value = new(big.Int).Set(value) diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 28fef8ee07..befd54ace3 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -18,17 +18,20 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/solgen/go/precompilesgen" "github.com/offchainlabs/nitro/util/stopwaiter" flag "github.com/spf13/pflag" ) +type ArbSysInterface interface { + ArbBlockNumber(*bind.CallOpts) (*big.Int, error) +} + type HeaderReader struct { stopwaiter.StopWaiter config ConfigFetcher client arbutil.L1Interface isParentChainArbitrum bool - arbSys *precompilesgen.ArbSys + arbSys ArbSysInterface chanMutex sync.RWMutex // All fields below require the chanMutex @@ -91,19 +94,18 @@ var TestConfig = Config{ UseFinalityData: false, } -func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher) (*HeaderReader, error) { +func New(ctx context.Context, client arbutil.L1Interface, config ConfigFetcher, arbSysPrecompile ArbSysInterface) (*HeaderReader, error) { isParentChainArbitrum := false - var arbSys *precompilesgen.ArbSys - codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) - if err != nil { - return nil, err - } - if len(codeAt) != 0 { - isParentChainArbitrum = true - arbSys, err = precompilesgen.NewArbSys(types.ArbSysAddress, client) + var arbSys ArbSysInterface + if arbSysPrecompile != nil { + codeAt, err := client.CodeAt(ctx, types.ArbSysAddress, nil) if err != nil { return nil, err } + if len(codeAt) != 0 { + isParentChainArbitrum = true + arbSys = arbSysPrecompile + } } return &HeaderReader{ client: client, @@ -393,6 +395,13 @@ func headerIndicatesFinalitySupport(header *types.Header) bool { return false } +func HeadersEqual(ha, hb *types.Header) bool { + if (ha == nil) != (hb == nil) { + return false + } + return (ha == nil && hb == nil) || ha.Hash() == hb.Hash() +} + func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.Header, error) { c.mutex.Lock() defer c.mutex.Unlock() @@ -400,7 +409,7 @@ func (s *HeaderReader) getCached(ctx context.Context, c *cachedHeader) (*types.H if err != nil { return nil, err } - if currentHead == c.headWhenCached { + if HeadersEqual(currentHead, c.headWhenCached) { return c.header, nil } if !s.config().UseFinalityData || !headerIndicatesFinalitySupport(currentHead) { @@ -455,6 +464,10 @@ func (s *HeaderReader) UseFinalityData() bool { return s.config().UseFinalityData } +func (s *HeaderReader) IsParentChainArbitrum() bool { + return s.isParentChainArbitrum +} + func (s *HeaderReader) Start(ctxIn context.Context) { s.StopWaiter.Start(ctxIn, s) s.LaunchThread(s.broadcastLoop) diff --git a/util/redisutil/redis_coordinator.go b/util/redisutil/redis_coordinator.go index 0ee92fef17..357dfb2e93 100644 --- a/util/redisutil/redis_coordinator.go +++ b/util/redisutil/redis_coordinator.go @@ -76,6 +76,32 @@ func (c *RedisCoordinator) CurrentChosenSequencer(ctx context.Context) (string, return current, nil } +// GetPriorities returns the priority list of sequencers +func (rc *RedisCoordinator) GetPriorities(ctx context.Context) ([]string, error) { + prioritiesString, err := rc.Client.Get(ctx, PRIORITIES_KEY).Result() + if err != nil { + if errors.Is(err, redis.Nil) { + err = errors.New("sequencer priorities unset") + } + return []string{}, err + } + prioritiesList := strings.Split(prioritiesString, ",") + return prioritiesList, nil +} + +// GetLiveliness returns a map whose keys are sequencers that have their liveliness set to OK +func (rc *RedisCoordinator) GetLiveliness(ctx context.Context) ([]string, error) { + livelinessList, _, err := rc.Client.Scan(ctx, 0, WANTS_LOCKOUT_KEY_PREFIX+"*", 0).Result() + if err != nil { + return []string{}, err + } + for i, elem := range livelinessList { + url := strings.TrimPrefix(elem, WANTS_LOCKOUT_KEY_PREFIX) + livelinessList[i] = url + } + return livelinessList, nil +} + func MessageKeyFor(pos arbutil.MessageIndex) string { return fmt.Sprintf("%s%d", MESSAGE_KEY_PREFIX, pos) } diff --git a/util/signature/simple_hmac.go b/util/signature/simple_hmac.go index b1c683742b..4899b5c22c 100644 --- a/util/signature/simple_hmac.go +++ b/util/signature/simple_hmac.go @@ -58,7 +58,7 @@ func SimpleHmacDangerousConfigAddOptions(prefix string, f *flag.FlagSet) { func SimpleHmacConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".signing-key", EmptySimpleHmacConfig.SigningKey, "a 32-byte (64-character) hex string used to sign messages, or a path to a file containing it") - f.String(prefix+".fallback-verification-key", EmptySimpleHmacConfig.SigningKey, "a fallback key used for message verification") + f.String(prefix+".fallback-verification-key", EmptySimpleHmacConfig.FallbackVerificationKey, "a fallback key used for message verification") SimpleHmacDangerousConfigAddOptions(prefix+".dangerous", f) } diff --git a/validator/server_api/valiation_api.go b/validator/server_api/valiation_api.go index 9e5191ec81..ca5aafcee2 100644 --- a/validator/server_api/valiation_api.go +++ b/validator/server_api/valiation_api.go @@ -91,7 +91,7 @@ func (a *ExecServerAPI) LatestWasmModuleRoot(ctx context.Context) (common.Hash, } func (a *ExecServerAPI) removeOldRuns(ctx context.Context) time.Duration { - oldestKept := time.Now().Add(-1 * a.config().ExecRunTimeout) + oldestKept := time.Now().Add(-1 * a.config().ExecutionRunTimeout) a.runIdLock.Lock() defer a.runIdLock.Unlock() for id, entry := range a.runs { @@ -99,7 +99,7 @@ func (a *ExecServerAPI) removeOldRuns(ctx context.Context) time.Duration { delete(a.runs, id) } } - return a.config().ExecRunTimeout / 5 + return a.config().ExecutionRunTimeout / 5 } func (a *ExecServerAPI) Start(ctx_in context.Context) { diff --git a/validator/server_arb/validator_spawner.go b/validator/server_arb/validator_spawner.go index 163ddb073a..ab04942871 100644 --- a/validator/server_arb/validator_spawner.go +++ b/validator/server_arb/validator_spawner.go @@ -24,24 +24,24 @@ import ( ) type ArbitratorSpawnerConfig struct { - Workers int `koanf:"workers" reload:"hot"` - OutputPath string `koanf:"output-path" reload:"hot"` - Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only - ExecRunTimeout time.Duration `koanf:"execution-run-timeout" reload:"hot"` + Workers int `koanf:"workers" reload:"hot"` + OutputPath string `koanf:"output-path" reload:"hot"` + Execution MachineCacheConfig `koanf:"execution" reload:"hot"` // hot reloading for new executions only + ExecutionRunTimeout time.Duration `koanf:"execution-run-timeout" reload:"hot"` } type ArbitratorSpawnerConfigFecher func() *ArbitratorSpawnerConfig var DefaultArbitratorSpawnerConfig = ArbitratorSpawnerConfig{ - Workers: 0, - OutputPath: "./target/output", - Execution: DefaultMachineCacheConfig, - ExecRunTimeout: time.Minute * 15, + Workers: 0, + OutputPath: "./target/output", + Execution: DefaultMachineCacheConfig, + ExecutionRunTimeout: time.Minute * 15, } func ArbitratorSpawnerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".workers", DefaultArbitratorSpawnerConfig.Workers, "number of concurrent validation threads") - f.Duration(prefix+".execution-run-timeout", DefaultArbitratorSpawnerConfig.ExecRunTimeout, "timeout before discarding execution run") + f.Duration(prefix+".execution-run-timeout", DefaultArbitratorSpawnerConfig.ExecutionRunTimeout, "timeout before discarding execution run") f.String(prefix+".output-path", DefaultArbitratorSpawnerConfig.OutputPath, "path to write machines to") MachineCacheConfigConfigAddOptions(prefix+".execution", f) } diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 913eae81f3..cd277387a0 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -60,6 +60,7 @@ type BroadcasterConfig struct { EnableCompression bool `koanf:"enable-compression" reload:"hot"` // if reloaded to false will cause disconnection of clients with enabled compression on next broadcast RequireCompression bool `koanf:"require-compression" reload:"hot"` // if reloaded to true will cause disconnection of clients with disabled compression on next broadcast LimitCatchup bool `koanf:"limit-catchup" reload:"hot"` + MaxCatchup int `koanf:"max-catchup" reload:"hot"` ConnectionLimits ConnectionLimiterConfig `koanf:"connection-limits" reload:"hot"` ClientDelay time.Duration `koanf:"client-delay" reload:"hot"` } @@ -83,7 +84,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.String(prefix+".port", DefaultBroadcasterConfig.Port, "port to bind the relay feed output to") f.Duration(prefix+".ping", DefaultBroadcasterConfig.Ping, "duration for ping interval") f.Duration(prefix+".client-timeout", DefaultBroadcasterConfig.ClientTimeout, "duration to wait before timing out connections to client") - f.Int(prefix+".queue", DefaultBroadcasterConfig.Queue, "queue size") + f.Int(prefix+".queue", DefaultBroadcasterConfig.Queue, "queue size for HTTP to WS upgrade") f.Int(prefix+".workers", DefaultBroadcasterConfig.Workers, "number of threads to reserve for HTTP to WS upgrade") f.Int(prefix+".max-send-queue", DefaultBroadcasterConfig.MaxSendQueue, "maximum number of messages allowed to accumulate before client is disconnected") f.Bool(prefix+".require-version", DefaultBroadcasterConfig.RequireVersion, "don't connect if client version not present") @@ -93,6 +94,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable-compression", DefaultBroadcasterConfig.EnableCompression, "enable per message deflate compression support") f.Bool(prefix+".require-compression", DefaultBroadcasterConfig.RequireCompression, "require clients to use compression") f.Bool(prefix+".limit-catchup", DefaultBroadcasterConfig.LimitCatchup, "only supply catchup buffer if requested sequence number is reasonable") + f.Int(prefix+".max-catchup", DefaultBroadcasterConfig.MaxCatchup, "the maximum size of the catchup buffer (-1 means unlimited)") ConnectionLimiterConfigAddOptions(prefix+".connection-limits", f) f.Duration(prefix+".client-delay", DefaultBroadcasterConfig.ClientDelay, "delay the first messages sent to each client by this amount") } @@ -117,6 +119,7 @@ var DefaultBroadcasterConfig = BroadcasterConfig{ EnableCompression: true, RequireCompression: false, LimitCatchup: false, + MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, } @@ -141,6 +144,7 @@ var DefaultTestBroadcasterConfig = BroadcasterConfig{ EnableCompression: true, RequireCompression: false, LimitCatchup: false, + MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, }