diff --git a/arbitrator/arbutil/src/evm/api.rs b/arbitrator/arbutil/src/evm/api.rs index f84f92ad96..093e7f2984 100644 --- a/arbitrator/arbutil/src/evm/api.rs +++ b/arbitrator/arbutil/src/evm/api.rs @@ -153,7 +153,7 @@ pub trait EvmApi: Send + 'static { ) -> (eyre::Result, u32, u64); /// Returns the EVM return data. - /// Analogous to `vm.RETURNDATA`. + /// Analogous to `vm.RETURNDATACOPY`. fn get_return_data(&self) -> D; /// Emits an EVM log with the given number of topics and data, the first bytes of which should be the topic data. diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index 18f9ecec09..f6c3e9fe8f 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -627,9 +627,9 @@ impl<'a> WasmBinary<'a> { ink_left: ink_left.as_u32(), ink_status: ink_status.as_u32(), depth_left: depth_left.as_u32(), - init_cost: init.try_into()?, - cached_init_cost: cached_init.try_into()?, - asm_estimate: asm_estimate.try_into()?, + init_cost: init.try_into().wrap_err("init cost too high")?, + cached_init_cost: cached_init.try_into().wrap_err("cached cost too high")?, + asm_estimate: asm_estimate.try_into().wrap_err("asm estimate too large")?, footprint, user_main, }) diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index 0303502645..6b0e1df3e1 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -1739,7 +1739,7 @@ impl Machine { pub fn jump_into_func(&mut self, module: u32, func: u32, mut args: Vec) -> Result<()> { let Some(source_module) = self.modules.get(module as usize) else { - bail!("no module at offest {}", module.red()) + bail!("no module at offset {}", module.red()) }; let Some(source_func) = source_module.funcs.get(func as usize) else { bail!( diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 0a9a45cc1e..ee00cdc618 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -38,10 +38,10 @@ import ( "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" - "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" @@ -98,7 +98,7 @@ type BatchPoster struct { bridgeAddr common.Address gasRefunderAddr common.Address building *buildingBatch - daWriter das.DataAvailabilityServiceWriter + dapWriter daprovider.Writer dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -129,7 +129,7 @@ const ( type BatchPosterConfig struct { Enable bool `koanf:"enable"` - DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` + DisableDapFallbackStoreDataOnChain bool `koanf:"disable-dap-fallback-store-data-on-chain" reload:"hot"` // Max batch size. MaxSize int `koanf:"max-size" reload:"hot"` // Maximum 4844 blob enabled batch size. @@ -189,7 +189,7 @@ type BatchPosterConfigFetcher func() *BatchPosterConfig func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") - f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") + f.Bool(prefix+".disable-dap-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDapFallbackStoreDataOnChain, "If unable to batch to DA provider, disable fallback storing data on chain") f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") @@ -214,7 +214,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, - DisableDasFallbackStoreDataOnChain: false, + DisableDapFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, Max4844BatchSize: blobs.BlobEncodableData*(params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - 2000, @@ -277,7 +277,7 @@ type BatchPosterOpts struct { Config BatchPosterConfigFetcher DeployInfo *chaininfo.RollupAddresses TransactOpts *bind.TransactOpts - DAWriter das.DataAvailabilityServiceWriter + DAPWriter daprovider.Writer ParentChainID *big.Int } @@ -323,7 +323,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e seqInboxAddr: opts.DeployInfo.SequencerInbox, gasRefunderAddr: opts.Config().gasRefunder, bridgeAddr: opts.DeployInfo.Bridge, - daWriter: opts.DAWriter, + dapWriter: opts.DAPWriter, redisLock: redisLock, } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) @@ -883,7 +883,7 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { } compressedBytes := s.compressedBuffer.Bytes() fullMsg := make([]byte, 1, len(compressedBytes)+1) - fullMsg[0] = arbstate.BrotliMessageHeaderByte + fullMsg[0] = daprovider.BrotliMessageHeaderByte fullMsg = append(fullMsg, compressedBytes...) return fullMsg, nil } @@ -1063,7 +1063,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } var use4844 bool config := b.config() - if config.Post4844Blobs && b.daWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.Post4844Blobs && b.dapWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) if err != nil { return false, err @@ -1246,7 +1246,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, nil } - if b.daWriter != nil { + if b.dapWriter != nil { if !b.redisLock.AttemptLock(ctx) { return false, errAttemptLockFailed } @@ -1258,17 +1258,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) { return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } - - cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled - if errors.Is(err, das.BatchToDasFailed) { - if config.DisableDasFallbackStoreDataOnChain { - return false, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") - } - log.Warn("Falling back to storing data on chain", "err", err) - } else if err != nil { + sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}, config.DisableDapFallbackStoreDataOnChain) + if err != nil { return false, err - } else { - sequencerMsg = das.Serialize(cert) } } diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index beb2656e2b..9ad984ae6c 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil, nil) + tracker, err := NewInboxTracker(db, streamer, nil) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index b758e95e62..7bf2dcb463 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -20,6 +20,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" m "github.com/offchainlabs/nitro/broadcaster/message" @@ -37,23 +38,17 @@ type InboxTracker struct { txStreamer *TransactionStreamer mutex sync.Mutex validator *staker.BlockValidator - das arbstate.DataAvailabilityReader - blobReader arbstate.BlobReader + dapReaders []daprovider.Reader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) { - // We support a nil txStreamer for the pruning code - if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { - return nil, errors.New("data availability service required but unconfigured") - } +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader) (*InboxTracker, error) { tracker := &InboxTracker{ db: db, txStreamer: txStreamer, - das: das, - blobReader: blobReader, + dapReaders: dapReaders, batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil @@ -659,14 +654,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - var daProviders []arbstate.DataAvailabilityProvider - if t.das != nil { - daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) - } - if t.blobReader != nil { - daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) - } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.dapReaders, daprovider.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index 43a05155fe..bc01445828 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -27,7 +27,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -254,7 +254,7 @@ type Node struct { L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses - BlobReader arbstate.BlobReader + BlobReader daprovider.BlobReader InboxReader *InboxReader InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer @@ -372,7 +372,7 @@ func createNodeImpl( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, - blobReader arbstate.BlobReader, + blobReader daprovider.BlobReader, ) (*Node, error) { config := configFetcher.Get() @@ -529,7 +529,18 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) + // We support a nil txStreamer for the pruning code + if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daReader == nil { + return nil, errors.New("data availability service required but unconfigured") + } + var dapReaders []daprovider.Reader + if daReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader)) + } + if blobReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) + } + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders) if err != nil { return nil, err } @@ -547,8 +558,7 @@ func createNodeImpl( txStreamer, exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), - daReader, - blobReader, + dapReaders, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) @@ -655,6 +665,10 @@ func createNodeImpl( if txOptsBatchPoster == nil && config.BatchPoster.DataPoster.ExternalSigner.URL == "" { return nil, errors.New("batchposter, but no TxOpts") } + var dapWriter daprovider.Writer + if daWriter != nil { + dapWriter = daprovider.NewWriterForDAS(daWriter) + } batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), L1Reader: l1Reader, @@ -665,7 +679,7 @@ func createNodeImpl( Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, DeployInfo: deployInfo, TransactOpts: txOptsBatchPoster, - DAWriter: daWriter, + DAPWriter: dapWriter, ParentChainID: parentChainID, }) if err != nil { @@ -725,7 +739,7 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, - blobReader arbstate.BlobReader, + blobReader daprovider.BlobReader, ) (*Node, error) { currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader) if err != nil { diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index edda4e5512..46e1edb78b 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,7 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -159,7 +159,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut if len(tx.BlobHashes()) == 0 { return nil, fmt.Errorf("blob batch transaction %v has no blobs", tx.Hash()) } - data := []byte{arbstate.BlobHashesHeaderFlag} + data := []byte{daprovider.BlobHashesHeaderFlag} for _, h := range tx.BlobHashes() { data = append(data, h[:]...) } diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go new file mode 100644 index 0000000000..560af3af1d --- /dev/null +++ b/arbstate/daprovider/reader.go @@ -0,0 +1,104 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package daprovider + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/blobs" +) + +type Reader interface { + // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider + IsValidHeaderByte(headerByte byte) bool + + // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information + RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, + ) ([]byte, error) +} + +// NewReaderForDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the Reader interface independently +func NewReaderForDAS(dasReader DASReader) *readerForDAS { + return &readerForDAS{dasReader: dasReader} +} + +type readerForDAS struct { + dasReader DASReader +} + +func (d *readerForDAS) IsValidHeaderByte(headerByte byte) bool { + return IsDASMessageHeaderByte(headerByte) +} + +func (d *readerForDAS) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, preimageRecorder, validateSeqMsg) +} + +// NewReaderForBlobReader is generally meant to be only used by nitro. +// DA Providers should implement methods in the Reader interface independently +func NewReaderForBlobReader(blobReader BlobReader) *readerForBlobReader { + return &readerForBlobReader{blobReader: blobReader} +} + +type readerForBlobReader struct { + blobReader BlobReader +} + +func (b *readerForBlobReader) IsValidHeaderByte(headerByte byte) bool { + return IsBlobHashesHeaderByte(headerByte) +} + +func (b *readerForBlobReader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + blobHashes := sequencerMsg[41:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, ErrInvalidBlobDataFormat + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + if preimageRecorder != nil { + for i, blob := range kzgBlobs { + // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable + // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview + b := blob + preimageRecorder(versionedHashes[i], b[:], arbutil.EthVersionedHashPreimageType) + } + } + payload, err := blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil + } + return payload, nil +} diff --git a/arbstate/das_reader.go b/arbstate/daprovider/util.go similarity index 62% rename from arbstate/das_reader.go rename to arbstate/daprovider/util.go index f131a53608..054bde5503 100644 --- a/arbstate/das_reader.go +++ b/arbstate/daprovider/util.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbstate +package daprovider import ( "bufio" @@ -13,18 +13,53 @@ import ( "io" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" ) -type DataAvailabilityReader interface { +type DASReader interface { GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) ExpirationPolicy(ctx context.Context) (ExpirationPolicy, error) } -var ErrHashMismatch = errors.New("result does not match expected hash") +type DASWriter interface { + // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). + Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*DataAvailabilityCertificate, error) + fmt.Stringer +} + +type BlobReader interface { + GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, + ) ([]kzg4844.Blob, error) + Initialize(ctx context.Context) error +} + +// PreimageRecorder is used to add (key,value) pair to the map accessed by key = ty of a bigger map, preimages. +// If ty doesn't exist as a key in the preimages map, then it is intialized to map[common.Hash][]byte and then (key,value) pair is added +type PreimageRecorder func(key common.Hash, value []byte, ty arbutil.PreimageType) + +// RecordPreimagesTo takes in preimages map and returns a function that can be used +// In recording (hash,preimage) key value pairs into preimages map, when fetching payload through RecoverPayloadFromBatch +func RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { + if preimages == nil { + return nil + } + return func(key common.Hash, value []byte, ty arbutil.PreimageType) { + if preimages[ty] == nil { + preimages[ty] = make(map[common.Hash][]byte) + } + preimages[ty][key] = value + } +} // DASMessageHeaderFlag indicates that this data is a certificate for the data availability service, // which will retrieve the full batch data. @@ -83,6 +118,114 @@ func IsKnownHeaderByte(b uint8) bool { return b&^KnownHeaderBits == 0 } +const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week +var ( + ErrHashMismatch = errors.New("result does not match expected hash") + ErrBatchToDasFailed = errors.New("unable to batch to DAS") + ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") + ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") + ErrSeqMsgValidation = errors.New("error validating recovered payload from batch") +) + +type KeysetValidationMode uint8 + +const KeysetValidate KeysetValidationMode = 0 +const KeysetPanicIfInvalid KeysetValidationMode = 1 +const KeysetDontValidate KeysetValidationMode = 2 + +func RecoverPayloadFromDasBatch( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + dasReader DASReader, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) + if err != nil { + log.Error("Failed to deserialize DAS message", "err", err) + return nil, nil + } + version := cert.Version + + if version >= 2 { + log.Error("Your node software is probably out of date", "certificateVersion", version) + return nil, nil + } + + getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { + newHash := hash + if version == 0 { + newHash = dastree.FlatHashToTreeHash(hash) + } + + preimage, err := dasReader.GetByHash(ctx, newHash) + if err != nil && hash != newHash { + log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) + preimage, err = dasReader.GetByHash(ctx, hash) + } + if err != nil { + return nil, err + } + + switch { + case version == 0 && crypto.Keccak256Hash(preimage) != hash: + fallthrough + case version == 1 && dastree.Hash(preimage) != hash: + log.Error( + "preimage mismatch for hash", + "hash", hash, "err", ErrHashMismatch, "version", version, + ) + return nil, ErrHashMismatch + } + return preimage, nil + } + + keysetPreimage, err := getByHash(ctx, cert.KeysetHash) + if err != nil { + log.Error("Couldn't get keyset", "err", err) + return nil, err + } + if preimageRecorder != nil { + dastree.RecordHash(preimageRecorder, keysetPreimage) + } + + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), !validateSeqMsg) + if err != nil { + return nil, fmt.Errorf("%w. Couldn't deserialize keyset, err: %w, keyset hash: %x batch num: %d", ErrSeqMsgValidation, err, cert.KeysetHash, batchNum) + } + err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) + if err != nil { + log.Error("Bad signature on DAS batch", "err", err) + return nil, nil + } + + maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) + if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { + log.Error("Data availability cert expires too soon", "err", "") + return nil, nil + } + + dataHash := cert.DataHash + payload, err := getByHash(ctx, dataHash) + if err != nil { + log.Error("Couldn't fetch DAS batch contents", "err", err) + return nil, err + } + + if preimageRecorder != nil { + if version == 0 { + treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) + preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) + preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf, arbutil.Keccak256PreimageType) + } else { + dastree.RecordHash(preimageRecorder, payload) + } + } + + return payload, nil +} + type DataAvailabilityCertificate struct { KeysetHash [32]byte DataHash [32]byte @@ -167,7 +310,7 @@ func (c *DataAvailabilityCertificate) SerializeSignableFields() []byte { func (c *DataAvailabilityCertificate) RecoverKeyset( ctx context.Context, - da DataAvailabilityReader, + da DASReader, assumeKeysetValid bool, ) (*DataAvailabilityKeyset, error) { keysetBytes, err := da.GetByHash(ctx, c.KeysetHash) @@ -316,3 +459,22 @@ func StringToExpirationPolicy(s string) (ExpirationPolicy, error) { return -1, fmt.Errorf("invalid Expiration Policy: %s", s) } } + +func Serialize(c *DataAvailabilityCertificate) []byte { + + flags := DASMessageHeaderFlag + if c.Version != 0 { + flags |= TreeDASMessageHeaderFlag + } + + buf := make([]byte, 0) + buf = append(buf, flags) + buf = append(buf, c.KeysetHash[:]...) + buf = append(buf, c.SerializeSignableFields()...) + + var intData [8]byte + binary.BigEndian.PutUint64(intData[:], c.SignersMask) + buf = append(buf, intData[:]...) + + return append(buf, blsSignatures.SignatureToBytes(c.Sig)...) +} diff --git a/arbstate/daprovider/writer.go b/arbstate/daprovider/writer.go new file mode 100644 index 0000000000..75b356c4b8 --- /dev/null +++ b/arbstate/daprovider/writer.go @@ -0,0 +1,48 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package daprovider + +import ( + "context" + "errors" + + "github.com/ethereum/go-ethereum/log" +) + +type Writer interface { + // Store posts the batch data to the invoking DA provider + // And returns sequencerMsg which is later used to retrieve the batch data + Store( + ctx context.Context, + message []byte, + timeout uint64, + sig []byte, + disableFallbackStoreDataOnChain bool, + ) ([]byte, error) +} + +// DAProviderWriterForDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the DAProviderWriter interface independently +func NewWriterForDAS(dasWriter DASWriter) *writerForDAS { + return &writerForDAS{dasWriter: dasWriter} +} + +type writerForDAS struct { + dasWriter DASWriter +} + +func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte, disableFallbackStoreDataOnChain bool) ([]byte, error) { + cert, err := d.dasWriter.Store(ctx, message, timeout, []byte{}) + if errors.Is(err, ErrBatchToDasFailed) { + if disableFallbackStoreDataOnChain { + return nil, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") + } + log.Warn("Falling back to storing data on chain", "err", err) + return message, nil + } else if err != nil { + return nil, err + } else { + return Serialize(cert), nil + } +} diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3105ee92b1..753ca19cd6 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -13,8 +13,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -22,9 +20,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/zeroheavy" ) @@ -40,15 +36,6 @@ type InboxBackend interface { ReadDelayedInbox(seqNum uint64) (*arbostypes.L1IncomingMessage, error) } -type BlobReader interface { - GetBlobs( - ctx context.Context, - batchBlockHash common.Hash, - versionedHashes []common.Hash, - ) ([]kzg4844.Blob, error) - Initialize(ctx context.Context) error -} - type sequencerMessage struct { minTimestamp uint64 maxTimestamp uint64 @@ -61,14 +48,8 @@ type sequencerMessage struct { const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -var ( - ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") - ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") -) - -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -86,22 +67,32 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // If the parent chain sequencer inbox smart contract authenticated this batch, // an unknown header byte must mean that this node is out of date, // because the smart contract understands the header byte and this node doesn't. - if len(payload) > 0 && IsL1AuthenticatedMessageHeaderByte(payload[0]) && !IsKnownHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(payload[0]) && !daprovider.IsKnownHeaderByte(payload[0]) { return nil, fmt.Errorf("%w: batch has unsupported authenticated header byte 0x%02x", arbosState.ErrFatalNodeOutOfDate, payload[0]) } // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - // We try to extract payload from the first occuring valid DA provider in the daProviders list + // We try to extract payload from the first occuring valid DA reader in the dapReaders list if len(payload) > 0 { foundDA := false var err error - for _, provider := range daProviders { - if provider != nil && provider.IsValidHeaderByte(payload[0]) { - payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) + for _, dapReader := range dapReaders { + if dapReader != nil && dapReader.IsValidHeaderByte(payload[0]) { + payload, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) if err != nil { - return nil, err + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(payload[0]) { + logLevel := log.Error + if keysetValidationMode == daprovider.KeysetPanicIfInvalid { + logLevel = log.Crit + } + logLevel(err.Error()) + } else { + return nil, err + } } if payload == nil { return parsedMsg, nil @@ -112,10 +103,10 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } if !foundDA { - if IsDASMessageHeaderByte(payload[0]) { + if daprovider.IsDASMessageHeaderByte(payload[0]) { log.Error("No DAS Reader configured, but sequencer message found with DAS header") - } else if IsBlobHashesHeaderByte(payload[0]) { - return nil, ErrNoBlobReader + } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { + return nil, daprovider.ErrNoBlobReader } } } @@ -124,7 +115,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // It's not safe to trust any part of the payload from this point onwards. // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). - if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { log.Warn("error reading from zeroheavy decoder", err.Error()) @@ -134,7 +125,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. - if len(payload) > 0 && IsBrotliMessageHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsBrotliMessageHeaderByte(payload[0]) { decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) if err == nil { reader := bytes.NewReader(decompressed) @@ -170,224 +161,24 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash return parsedMsg, nil } -func RecoverPayloadFromDasBatch( - ctx context.Context, - batchNum uint64, - sequencerMsg []byte, - dasReader DataAvailabilityReader, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - var keccakPreimages map[common.Hash][]byte - if preimages != nil { - if preimages[arbutil.Keccak256PreimageType] == nil { - preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - keccakPreimages = preimages[arbutil.Keccak256PreimageType] - } - cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) - if err != nil { - log.Error("Failed to deserialize DAS message", "err", err) - return nil, nil - } - version := cert.Version - recordPreimage := func(key common.Hash, value []byte) { - keccakPreimages[key] = value - } - - if version >= 2 { - log.Error("Your node software is probably out of date", "certificateVersion", version) - return nil, nil - } - - getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { - newHash := hash - if version == 0 { - newHash = dastree.FlatHashToTreeHash(hash) - } - - preimage, err := dasReader.GetByHash(ctx, newHash) - if err != nil && hash != newHash { - log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) - preimage, err = dasReader.GetByHash(ctx, hash) - } - if err != nil { - return nil, err - } - - switch { - case version == 0 && crypto.Keccak256Hash(preimage) != hash: - fallthrough - case version == 1 && dastree.Hash(preimage) != hash: - log.Error( - "preimage mismatch for hash", - "hash", hash, "err", ErrHashMismatch, "version", version, - ) - return nil, ErrHashMismatch - } - return preimage, nil - } - - keysetPreimage, err := getByHash(ctx, cert.KeysetHash) - if err != nil { - log.Error("Couldn't get keyset", "err", err) - return nil, err - } - if keccakPreimages != nil { - dastree.RecordHash(recordPreimage, keysetPreimage) - } - - keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), keysetValidationMode == KeysetDontValidate) - if err != nil { - logLevel := log.Error - if keysetValidationMode == KeysetPanicIfInvalid { - logLevel = log.Crit - } - logLevel("Couldn't deserialize keyset", "err", err, "keysetHash", cert.KeysetHash, "batchNum", batchNum) - return nil, nil - } - err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) - if err != nil { - log.Error("Bad signature on DAS batch", "err", err) - return nil, nil - } - - maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) - if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { - log.Error("Data availability cert expires too soon", "err", "") - return nil, nil - } - - dataHash := cert.DataHash - payload, err := getByHash(ctx, dataHash) - if err != nil { - log.Error("Couldn't fetch DAS batch contents", "err", err) - return nil, err - } - - if keccakPreimages != nil { - if version == 0 { - treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - keccakPreimages[dataHash] = payload - keccakPreimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf - } else { - dastree.RecordHash(recordPreimage, payload) - } - } - - return payload, nil -} - -type DataAvailabilityProvider interface { - // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider - IsValidHeaderByte(headerByte byte) bool - - // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information - RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, - ) ([]byte, error) -} - -// NewDAProviderDAS is generally meant to be only used by nitro. -// DA Providers should implement methods in the DataAvailabilityProvider interface independently -func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { - return &dAProviderForDAS{ - das: das, - } -} - -type dAProviderForDAS struct { - das DataAvailabilityReader -} - -func (d *dAProviderForDAS) IsValidHeaderByte(headerByte byte) bool { - return IsDASMessageHeaderByte(headerByte) -} - -func (d *dAProviderForDAS) RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.das, preimages, keysetValidationMode) -} - -// NewDAProviderBlobReader is generally meant to be only used by nitro. -// DA Providers should implement methods in the DataAvailabilityProvider interface independently -func NewDAProviderBlobReader(blobReader BlobReader) *dAProviderForBlobReader { - return &dAProviderForBlobReader{ - blobReader: blobReader, - } -} - -type dAProviderForBlobReader struct { - blobReader BlobReader -} - -func (b *dAProviderForBlobReader) IsValidHeaderByte(headerByte byte) bool { - return IsBlobHashesHeaderByte(headerByte) -} - -func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - blobHashes := sequencerMsg[41:] - if len(blobHashes)%len(common.Hash{}) != 0 { - return nil, ErrInvalidBlobDataFormat - } - versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) - for i := 0; i*32 < len(blobHashes); i += 1 { - copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) - } - kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) - if err != nil { - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - payload, err := blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return nil, nil - } - return payload, nil -} - -type KeysetValidationMode uint8 - -const KeysetValidate KeysetValidationMode = 0 -const KeysetPanicIfInvalid KeysetValidationMode = 1 -const KeysetDontValidate KeysetValidationMode = 2 - type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - daProviders []DataAvailabilityProvider + dapReaders []daprovider.Reader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 cachedSegmentTimestamp uint64 cachedSegmentBlockNumber uint64 cachedSubMessageNumber uint64 - keysetValidationMode KeysetValidationMode + keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, - daProviders: daProviders, + dapReaders: dapReaders, keysetValidationMode: keysetValidationMode, } } @@ -409,7 +200,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dapReaders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index b34c02534b..5ede321810 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type multiplexerBackend struct { @@ -67,7 +68,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, daprovider.KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 3e96412648..8036487d26 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -14,8 +14,6 @@ import ( "syscall" "time" - "golang.org/x/exp/slog" - koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" @@ -46,7 +44,7 @@ type DAServerConfig struct { DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` Conf genericconf.ConfConfig `koanf:"conf"` - LogLevel int `koanf:"log-level"` + LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` @@ -66,7 +64,7 @@ var DefaultDAServerConfig = DAServerConfig{ RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, DataAvailability: das.DefaultDataAvailabilityConfig, Conf: genericconf.ConfConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -103,7 +101,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("pprof", DefaultDAServerConfig.PProf, "enable pprof") genericconf.PProfAddOptions("pprof-cfg", f) - f.Int("log-level", int(log.LvlInfo), "log level; 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE") + f.String("log-level", DefaultDAServerConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", DefaultDAServerConfig.LogType, "log type (plaintext or json)") das.DataAvailabilityConfigAddDaserverOptions("data-availability", f) @@ -185,13 +183,18 @@ func startup() error { confighelpers.PrintErrorAndExit(errors.New("please specify at least one of --enable-rest or --enable-rpc"), printSampleUsage) } + logLevel, err := genericconf.ToSlogLevel(serverConfig.LogLevel) + if err != nil { + confighelpers.PrintErrorAndExit(err, printSampleUsage) + } + handler, err := genericconf.HandlerFromLogType(serverConfig.LogType, io.Writer(os.Stderr)) if err != nil { flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } glogger := log.NewGlogHandler(handler) - glogger.Verbosity(slog.Level(serverConfig.LogLevel)) + glogger.Verbosity(logLevel) log.SetDefault(log.NewLogger(glogger)) if err := startMetrics(serverConfig); err != nil { diff --git a/cmd/dataavailability/data_availability_check.go b/cmd/dataavailability/data_availability_check.go index 72a311a7be..d80c0475bf 100644 --- a/cmd/dataavailability/data_availability_check.go +++ b/cmd/dataavailability/data_availability_check.go @@ -21,7 +21,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -65,7 +65,7 @@ type DataAvailabilityCheck struct { config *DataAvailabilityCheckConfig inboxAddr *common.Address inboxContract *bridgegen.SequencerInbox - urlToReaderMap map[string]arbstate.DataAvailabilityReader + urlToReaderMap map[string]daprovider.DASReader checkInterval time.Duration } @@ -86,7 +86,7 @@ func newDataAvailabilityCheck(ctx context.Context, dataAvailabilityCheckConfig * if err != nil { return nil, err } - urlToReaderMap := make(map[string]arbstate.DataAvailabilityReader, len(onlineUrls)) + urlToReaderMap := make(map[string]daprovider.DASReader, len(onlineUrls)) for _, url := range onlineUrls { reader, err := das.NewRestfulDasClientFromURL(url) if err != nil { @@ -238,7 +238,7 @@ func (d *DataAvailabilityCheck) checkDataAvailability(ctx context.Context, deliv if data == nil { return false, nil } - cert, err := arbstate.DeserializeDASCertFrom(bytes.NewReader(data)) + cert, err := daprovider.DeserializeDASCertFrom(bytes.NewReader(data)) if err != nil { return true, err } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index d78d975fd5..3f64a990ca 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" @@ -165,7 +165,7 @@ func startClientStore(args []string) error { } ctx := context.Background() - var cert *arbstate.DataAvailabilityCertificate + var cert *daprovider.DataAvailabilityCertificate if config.RandomMessageSize > 0 { message := make([]byte, config.RandomMessageSize) @@ -184,7 +184,7 @@ func startClientStore(args []string) error { return err } - serializedCert := das.Serialize(cert) + serializedCert := daprovider.Serialize(cert) fmt.Printf("Hex Encoded Cert: %s\n", hexutil.Encode(serializedCert)) fmt.Printf("Hex Encoded Data Hash: %s\n", hexutil.Encode(cert.DataHash[:])) diff --git a/cmd/genericconf/logging.go b/cmd/genericconf/logging.go index d77071a0bf..fa45953278 100644 --- a/cmd/genericconf/logging.go +++ b/cmd/genericconf/logging.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/ethereum/go-ethereum/log" - "golang.org/x/exp/slog" "gopkg.in/natefinch/lumberjack.v2" ) @@ -90,7 +89,7 @@ func (l *fileLoggerFactory) close() error { } // initLog is not threadsafe -func InitLog(logType string, logLevel slog.Level, fileLoggingConfig *FileLoggingConfig, pathResolver func(string) string) error { +func InitLog(logType string, logLevel string, fileLoggingConfig *FileLoggingConfig, pathResolver func(string) string) error { var glogger *log.GlogHandler // always close previous instance of file logger if err := globalFileLoggerFactory.close(); err != nil { @@ -111,8 +110,14 @@ func InitLog(logType string, logLevel slog.Level, fileLoggingConfig *FileLogging flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } + slogLevel, err := ToSlogLevel(logLevel) + if err != nil { + flag.Usage() + return fmt.Errorf("error parsing log level: %w", err) + } + glogger = log.NewGlogHandler(handler) - glogger.Verbosity(logLevel) + glogger.Verbosity(slogLevel) log.SetDefault(log.NewLogger(glogger)) return nil } diff --git a/cmd/genericconf/loglevel.go b/cmd/genericconf/loglevel.go new file mode 100644 index 0000000000..f7ad05a2cc --- /dev/null +++ b/cmd/genericconf/loglevel.go @@ -0,0 +1,38 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package genericconf + +import ( + "errors" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/log" + "golang.org/x/exp/slog" +) + +func ToSlogLevel(str string) (slog.Level, error) { + switch strings.ToLower(str) { + case "trace": + return log.LevelTrace, nil + case "debug": + return log.LevelDebug, nil + case "info": + return log.LevelInfo, nil + case "warn": + return log.LevelWarn, nil + case "error": + return log.LevelError, nil + case "crit": + return log.LevelCrit, nil + default: + legacyLevel, err := strconv.Atoi(str) + if err != nil { + // Leave legacy geth numeric log levels undocumented, but if anyone happens + // to be using them, it will work. + return log.LevelTrace, errors.New("invalid log-level") + } + return log.FromLegacyLevel(legacyLevel), nil + } +} diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 51d3978836..b52a1c6b5e 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -2,10 +2,10 @@ package main import ( "fmt" + "reflect" "time" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/nat" @@ -20,7 +20,7 @@ import ( type ValidationNodeConfig struct { Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - LogLevel int `koanf:"log-level" reload:"hot"` + LogLevel string `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` Persistent conf.PersistentConfig `koanf:"persistent"` @@ -61,7 +61,7 @@ var IPCConfigDefault = genericconf.IPCConfig{ var ValidationNodeConfigDefault = ValidationNodeConfig{ Conf: genericconf.ConfConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Persistent: conf.PersistentConfigDefault, HTTP: HTTPConfigDefault, @@ -79,7 +79,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ func ValidationNodeConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) valnode.ValidationConfigAddOptions("validation", f) - f.Int("log-level", ValidationNodeConfigDefault.LogLevel, "log level") + f.String("log-level", ValidationNodeConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", ValidationNodeConfigDefault.LogType, "log type (plaintext or json)") genericconf.FileLoggingConfigAddOptions("file-logging", f) conf.PersistentConfigAddOptions("persistent", f) diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 4e543f7953..1e894336ea 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -22,7 +22,6 @@ import ( "github.com/offchainlabs/nitro/cmd/util/confighelpers" _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/validator/valnode" - "golang.org/x/exp/slog" ) func printSampleUsage(name string) { @@ -90,7 +89,7 @@ func mainImpl() int { } } - err = genericconf.InitLog(nodeConfig.LogType, slog.Level(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + err = genericconf.InitLog(nodeConfig.LogType, nodeConfig.LogLevel, &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 @@ -109,7 +108,7 @@ func mainImpl() int { liveNodeConfig := genericconf.NewLiveConfig[*ValidationNodeConfig](args, nodeConfig, ParseNode) liveNodeConfig.SetOnReloadHook(func(oldCfg *ValidationNodeConfig, newCfg *ValidationNodeConfig) error { - return genericconf.InitLog(newCfg.LogType, slog.Level(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + return genericconf.InitLog(newCfg.LogType, newCfg.LogLevel, &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) }) valnode.EnsureValidationExposedViaAuthRPC(&stackConf) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index df0feca8ee..9280c3af02 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -42,7 +42,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/resourcemanager" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -63,7 +63,6 @@ import ( "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" - "golang.org/x/exp/slog" ) func printSampleUsage(name string) { @@ -208,7 +207,7 @@ func mainImpl() int { } stackConf.JWTSecret = filename } - err = genericconf.InitLog(nodeConfig.LogType, slog.Level(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + err = genericconf.InitLog(nodeConfig.LogType, nodeConfig.LogLevel, &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 @@ -325,7 +324,7 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client var l1Reader *headerreader.HeaderReader - var blobReader arbstate.BlobReader + var blobReader daprovider.BlobReader if nodeConfig.Node.ParentChainReader.Enable { confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) @@ -600,7 +599,7 @@ func mainImpl() int { } liveNodeConfig.SetOnReloadHook(func(oldCfg *NodeConfig, newCfg *NodeConfig) error { - if err := genericconf.InitLog(newCfg.LogType, slog.Level(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { + if err := genericconf.InitLog(newCfg.LogType, newCfg.LogLevel, &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { return fmt.Errorf("failed to re-init logging: %w", err) } return currentNode.OnConfigReload(&oldCfg.Node, &newCfg.Node) @@ -691,7 +690,7 @@ type NodeConfig struct { Validation valnode.Config `koanf:"validation" reload:"hot"` ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` Chain conf.L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level" reload:"hot"` + LogLevel string `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` Persistent conf.PersistentConfig `koanf:"persistent"` @@ -717,7 +716,7 @@ var NodeConfigDefault = NodeConfig{ Validation: valnode.DefaultValidationConfig, ParentChain: conf.L1ConfigDefault, Chain: conf.L2ConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", FileLogging: genericconf.DefaultFileLoggingConfig, Persistent: conf.PersistentConfigDefault, @@ -743,7 +742,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { valnode.ValidationConfigAddOptions("validation", f) conf.L1ConfigAddOptions("parent-chain", f) conf.L2ConfigAddOptions("chain", f) - f.Int("log-level", NodeConfigDefault.LogLevel, "log level") + f.String("log-level", NodeConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", NodeConfigDefault.LogType, "log type (plaintext or json)") genericconf.FileLoggingConfigAddOptions("file-logging", f) conf.PersistentConfigAddOptions("persistent", f) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index c483526aa1..3ef888e897 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) if err != nil { return nil, err } diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 5a7499e691..6f786f976a 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -20,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/relay" - "golang.org/x/exp/slog" ) func main() { @@ -69,8 +68,12 @@ func startup() error { flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } + logLevel, err := genericconf.ToSlogLevel(relayConfig.LogLevel) + if err != nil { + confighelpers.PrintErrorAndExit(err, printSampleUsage) + } glogger := log.NewGlogHandler(handler) - glogger.Verbosity(slog.Level(relayConfig.LogLevel)) + glogger.Verbosity(logLevel) log.SetDefault(log.NewLogger(glogger)) vcsRevision, _, vcsTime := confighelpers.GetVersion() diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 3348d0b431..71ea6760ed 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -28,6 +28,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das/dastree" @@ -116,8 +117,8 @@ func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { return nil } -func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.DiscardImmediately, nil +func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.DiscardImmediately, nil } type BlobPreimageReader struct { @@ -203,21 +204,21 @@ func main() { if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - var dasReader arbstate.DataAvailabilityReader + var dasReader daprovider.DASReader if dasEnabled { dasReader = &PreimageDASReader{} } backend := WavmInbox{} - var keysetValidationMode = arbstate.KeysetPanicIfInvalid + var keysetValidationMode = daprovider.KeysetPanicIfInvalid if backend.GetPositionWithinMessage() > 0 { - keysetValidationMode = arbstate.KeysetDontValidate + keysetValidationMode = daprovider.KeysetDontValidate } - var daProviders []arbstate.DataAvailabilityProvider + var dapReaders []daprovider.Reader if dasReader != nil { - daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader)) } - daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/das/aggregator.go b/das/aggregator.go index 4b4571eb43..d3edd58437 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" @@ -37,8 +37,6 @@ var DefaultAggregatorConfig = AggregatorConfig{ Backends: "", } -var BatchToDasFailed = errors.New("unable to batch to DAS") - func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") @@ -164,7 +162,7 @@ type storeResponse struct { // constructed, calls to Store(...) will try to verify the passed-in data's signature // is from the batch poster. If the contract details are not provided, then the // signature is not checked, which is useful for testing. -func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig)) if a.addrVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) @@ -243,7 +241,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, }(ctx, d) } - var aggCert arbstate.DataAvailabilityCertificate + var aggCert daprovider.DataAvailabilityCertificate type certDetails struct { pubKeys []blsSignatures.PublicKey @@ -296,7 +294,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, } } else if storeFailures > a.maxAllowedServiceStoreFailures { cd := certDetails{} - cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, BatchToDasFailed) + cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed) certDetailsChan <- cd returned = true } @@ -323,10 +321,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey) if err != nil { //nolint:errorlint - return nil, fmt.Errorf("%s. %w", err.Error(), BatchToDasFailed) + return nil, fmt.Errorf("%s. %w", err.Error(), daprovider.ErrBatchToDasFailed) } if !verified { - return nil, fmt.Errorf("failed aggregate signature check. %w", BatchToDasFailed) + return nil, fmt.Errorf("failed aggregate signature check. %w", daprovider.ErrBatchToDasFailed) } return &aggCert, nil } diff --git a/das/aggregator_test.go b/das/aggregator_test.go index ef8ef5327a..728db6cf50 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -16,10 +16,10 @@ import ( "testing" "time" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" ) func TestDAS_BasicAggregationLocal(t *testing.T) { @@ -123,7 +123,7 @@ type WrapStore struct { DataAvailabilityServiceWriter } -func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { switch w.injector.shouldFail() { case success: return w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) diff --git a/das/cache_storage_service.go b/das/cache_storage_service.go index 13bdb189d3..439ccda086 100644 --- a/das/cache_storage_service.go +++ b/das/cache_storage_service.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" @@ -82,7 +82,7 @@ func (c *CacheStorageService) Close(ctx context.Context) error { return c.baseStorageService.Close(ctx) } -func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *CacheStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return c.baseStorageService.ExpirationPolicy(ctx) } diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index bc8ab5bc19..99311decaa 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -8,7 +8,7 @@ import ( "errors" "sync" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -38,13 +38,13 @@ func (c *syncedKeysetCache) put(key [32]byte, value []byte) { } type ChainFetchReader struct { - arbstate.DataAvailabilityReader + daprovider.DASReader seqInboxCaller *bridgegen.SequencerInboxCaller seqInboxFilterer *bridgegen.SequencerInboxFilterer keysetCache syncedKeysetCache } -func NewChainFetchReader(inner arbstate.DataAvailabilityReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { +func NewChainFetchReader(inner daprovider.DASReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, l1client) if err != nil { return nil, err @@ -53,18 +53,18 @@ func NewChainFetchReader(inner arbstate.DataAvailabilityReader, l1client arbutil return NewChainFetchReaderWithSeqInbox(inner, seqInbox) } -func NewChainFetchReaderWithSeqInbox(inner arbstate.DataAvailabilityReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { +func NewChainFetchReaderWithSeqInbox(inner daprovider.DASReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { return &ChainFetchReader{ - DataAvailabilityReader: inner, - seqInboxCaller: &seqInbox.SequencerInboxCaller, - seqInboxFilterer: &seqInbox.SequencerInboxFilterer, - keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, + DASReader: inner, + seqInboxCaller: &seqInbox.SequencerInboxCaller, + seqInboxFilterer: &seqInbox.SequencerInboxFilterer, + keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, }, nil } func (c *ChainFetchReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.PrettyHash(hash)) - return chainFetchGetByHash(ctx, c.DataAvailabilityReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) + return chainFetchGetByHash(ctx, c.DASReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) } func (c *ChainFetchReader) String() string { return "ChainFetchReader" @@ -72,7 +72,7 @@ func (c *ChainFetchReader) String() string { func chainFetchGetByHash( ctx context.Context, - daReader arbstate.DataAvailabilityReader, + daReader daprovider.DASReader, cache *syncedKeysetCache, seqInboxCaller *bridgegen.SequencerInboxCaller, seqInboxFilterer *bridgegen.SequencerInboxFilterer, diff --git a/das/das.go b/das/das.go index dd8e43a34d..b0708e3b33 100644 --- a/das/das.go +++ b/das/das.go @@ -5,7 +5,6 @@ package das import ( "context" - "encoding/binary" "errors" "fmt" "math" @@ -16,18 +15,17 @@ import ( "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type DataAvailabilityServiceWriter interface { // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). - Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) + Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) fmt.Stringer } type DataAvailabilityServiceReader interface { - arbstate.DataAvailabilityReader + daprovider.DASReader fmt.Stringer } @@ -138,25 +136,6 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "parent chain address of SequencerInbox contract") } -func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { - - flags := arbstate.DASMessageHeaderFlag - if c.Version != 0 { - flags |= arbstate.TreeDASMessageHeaderFlag - } - - buf := make([]byte, 0) - buf = append(buf, flags) - buf = append(buf, c.KeysetHash[:]...) - buf = append(buf, c.SerializeSignableFields()...) - - var intData [8]byte - binary.BigEndian.PutUint64(intData[:], c.SignersMask) - buf = append(buf, intData[:]...) - - return append(buf, blsSignatures.SignatureToBytes(c.Sig)...) -} - func GetL1Client(ctx context.Context, maxConnectionAttempts int, l1URL string) (*ethclient.Client, error) { if maxConnectionAttempts <= 0 { maxConnectionAttempts = math.MaxInt diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 54d8eba94c..5fca1e449f 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/util/pretty" ) @@ -34,7 +34,7 @@ func NewDASRPCClient(target string) (*DASRPCClient, error) { }, nil } -func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(reqSig), "this", *c) var ret StoreResult if err := c.clnt.CallContext(ctx, &ret, "das_store", hexutil.Bytes(message), hexutil.Uint64(timeout), hexutil.Bytes(reqSig)); err != nil { @@ -44,7 +44,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 if err != nil { return nil, err } - return &arbstate.DataAvailabilityCertificate{ + return &daprovider.DataAvailabilityCertificate{ DataHash: common.BytesToHash(ret.DataHash), Timeout: uint64(ret.Timeout), SignersMask: uint64(ret.SignersMask), @@ -62,11 +62,11 @@ func (c *DASRPCClient) HealthCheck(ctx context.Context) error { return c.clnt.CallContext(ctx, nil, "das_healthCheck") } -func (c *DASRPCClient) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *DASRPCClient) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { var res string err := c.clnt.CallContext(ctx, &res, "das_expirationPolicy") if err != nil { return -1, err } - return arbstate.StringToExpirationPolicy(res) + return daprovider.StringToExpirationPolicy(res) } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index bc325a3200..d873f0568d 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -26,7 +27,7 @@ type node struct { // RecordHash chunks the preimage into 64kB bins and generates a recursive hash tree, // calling the caller-supplied record function for each hash/preimage pair created in // building the tree structure. -func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { +func RecordHash(record func(bytes32, []byte, arbutil.PreimageType), preimage ...[]byte) bytes32 { // Algorithm // 1. split the preimage into 64kB bins and double hash them to produce the tree's leaves // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root @@ -48,7 +49,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { keccord := func(value []byte) bytes32 { hash := crypto.Keccak256Hash(value) - record(hash, value) + record(hash, value, arbutil.Keccak256PreimageType) return hash } prepend := func(before byte, slice []byte) []byte { @@ -94,7 +95,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { func Hash(preimage ...[]byte) bytes32 { // Merkelizes without recording anything. All but the validator's DAS will call this - return RecordHash(func(bytes32, []byte) {}, preimage...) + return RecordHash(func(bytes32, []byte, arbutil.PreimageType) {}, preimage...) } func HashBytes(preimage ...[]byte) []byte { diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index 33f729f4f3..4d24c9ae98 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/testhelpers" @@ -25,7 +26,7 @@ func TestDASTree(t *testing.T) { tests = append(tests, large) } - record := func(key bytes32, value []byte) { + record := func(key bytes32, value []byte, ty arbutil.PreimageType) { colors.PrintGrey("storing ", key, " ", pretty.PrettyBytes(value)) store[key] = value if crypto.Keccak256Hash(value) != key { diff --git a/das/db_storage_service.go b/das/db_storage_service.go index 33d21942b2..5596ff378e 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -12,7 +12,7 @@ import ( badger "github.com/dgraph-io/badger/v4" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -173,11 +173,11 @@ func (dbs *DBStorageService) Close(ctx context.Context) error { return dbs.stopWaiter.StopAndWait() } -func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { if dbs.discardAfterTimeout { - return arbstate.DiscardAfterDataTimeout, nil + return daprovider.DiscardAfterDataTimeout, nil } - return arbstate.KeepForever, nil + return daprovider.KeepForever, nil } func (dbs *DBStorageService) String() string { diff --git a/das/extra_signature_checker_test.go b/das/extra_signature_checker_test.go index 88a0969229..2fcfac167d 100644 --- a/das/extra_signature_checker_test.go +++ b/das/extra_signature_checker_test.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/signature" ) @@ -22,7 +22,7 @@ type StubSignatureCheckDAS struct { keyDir string } -func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { pubkeyEncoded, err := ioutil.ReadFile(s.keyDir + "/ecdsa.pub") if err != nil { return nil, err @@ -39,8 +39,8 @@ func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeo return nil, nil } -func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *StubSignatureCheckDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index a78b4104e8..49f961da60 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/pretty" @@ -18,7 +18,7 @@ import ( type FallbackStorageService struct { StorageService - backup arbstate.DataAvailabilityReader + backup daprovider.DASReader backupHealthChecker DataAvailabilityServiceHealthChecker backupRetentionSeconds uint64 ignoreRetentionWriteErrors bool @@ -32,7 +32,7 @@ type FallbackStorageService struct { // a successful GetByHash result from the backup is Put into the primary. func NewFallbackStorageService( primary StorageService, - backup arbstate.DataAvailabilityReader, + backup daprovider.DASReader, backupHealthChecker DataAvailabilityServiceHealthChecker, backupRetentionSeconds uint64, // how long to retain data that we copy in from the backup (MaxUint64 means forever) ignoreRetentionWriteErrors bool, // if true, don't return error if write of retention data to primary fails diff --git a/das/ipfs_storage_service.bkup_go b/das/ipfs_storage_service.bkup_go index 11d435affb..43b06fd4b6 100644 --- a/das/ipfs_storage_service.bkup_go +++ b/das/ipfs_storage_service.bkup_go @@ -25,7 +25,8 @@ import ( "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" "github.com/multiformats/go-multihash" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -183,7 +184,7 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 var chunks [][]byte - record := func(_ common.Hash, value []byte) { + record := func(_ common.Hash, value []byte, ty arbutil.PreimageType) { chunks = append(chunks, value) } @@ -222,8 +223,8 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 panic("unreachable") } -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *IpfsStorageService) Sync(ctx context.Context) error { diff --git a/das/ipfs_storage_service_stub.go b/das/ipfs_storage_service_stub.go index db434d5bf3..5814f2c7e4 100644 --- a/das/ipfs_storage_service_stub.go +++ b/das/ipfs_storage_service_stub.go @@ -14,7 +14,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" flag "github.com/spf13/pflag" ) @@ -47,8 +47,8 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 return ErrIpfsNotSupported } -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, ErrIpfsNotSupported +func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, ErrIpfsNotSupported } func (s *IpfsStorageService) Sync(ctx context.Context) error { diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 5fa5306e39..4ebb1d56d9 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" @@ -130,8 +130,8 @@ func (s *LocalFileStorageService) Close(ctx context.Context) error { return nil } -func (s *LocalFileStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *LocalFileStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *LocalFileStorageService) String() string { diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 6484231479..91f7d9a2f5 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" ) @@ -79,8 +79,8 @@ func (m *MemoryBackedStorageService) Close(ctx context.Context) error { return nil } -func (m *MemoryBackedStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (m *MemoryBackedStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (m *MemoryBackedStorageService) String() string { diff --git a/das/panic_wrapper.go b/das/panic_wrapper.go index 7a15f6bec0..dbb61cba96 100644 --- a/das/panic_wrapper.go +++ b/das/panic_wrapper.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type WriterPanicWrapper struct { @@ -26,7 +26,7 @@ func (w *WriterPanicWrapper) String() string { return fmt.Sprintf("WriterPanicWrapper{%v}", w.DataAvailabilityServiceWriter) } -func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) if err != nil { panic(fmt.Sprintf("panic wrapper Store: %v", err)) diff --git a/das/read_limited.go b/das/read_limited.go index 74d6d5358d..5ef0335d5f 100644 --- a/das/read_limited.go +++ b/das/read_limited.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) // These classes are wrappers implementing das.StorageService and das.DataAvailabilityService. @@ -16,12 +16,12 @@ import ( // it is a programming error in the code setting up the node or daserver if a non-writeable object // is used in a writeable context. -func NewReadLimitedStorageService(reader arbstate.DataAvailabilityReader) *readLimitedStorageService { +func NewReadLimitedStorageService(reader daprovider.DASReader) *readLimitedStorageService { return &readLimitedStorageService{reader} } type readLimitedStorageService struct { - arbstate.DataAvailabilityReader + daprovider.DASReader } func (s *readLimitedStorageService) Put(ctx context.Context, data []byte, expiration uint64) error { @@ -37,22 +37,22 @@ func (s *readLimitedStorageService) Close(ctx context.Context) error { } func (s *readLimitedStorageService) String() string { - return fmt.Sprintf("readLimitedStorageService(%v)", s.DataAvailabilityReader) + return fmt.Sprintf("readLimitedStorageService(%v)", s.DASReader) } type readLimitedDataAvailabilityService struct { - arbstate.DataAvailabilityReader + daprovider.DASReader } -func NewReadLimitedDataAvailabilityService(da arbstate.DataAvailabilityReader) *readLimitedDataAvailabilityService { +func NewReadLimitedDataAvailabilityService(da daprovider.DASReader) *readLimitedDataAvailabilityService { return &readLimitedDataAvailabilityService{da} } -func (*readLimitedDataAvailabilityService) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (*readLimitedDataAvailabilityService) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { panic("Logic error: readLimitedDataAvailabilityService.Store shouldn't be called.") } func (s *readLimitedDataAvailabilityService) String() string { - return fmt.Sprintf("ReadLimitedDataAvailabilityService(%v)", s.DataAvailabilityReader) + return fmt.Sprintf("ReadLimitedDataAvailabilityService(%v)", s.DASReader) } diff --git a/das/reader_aggregator_strategies.go b/das/reader_aggregator_strategies.go index 855be5e318..d20760bd5b 100644 --- a/das/reader_aggregator_strategies.go +++ b/das/reader_aggregator_strategies.go @@ -10,30 +10,30 @@ import ( "sync" "sync/atomic" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) var ErrNoReadersResponded = errors.New("no DAS readers responded successfully") type aggregatorStrategy interface { newInstance() aggregatorStrategyInstance - update([]arbstate.DataAvailabilityReader, map[arbstate.DataAvailabilityReader]readerStats) + update([]daprovider.DASReader, map[daprovider.DASReader]readerStats) } type abstractAggregatorStrategy struct { sync.RWMutex - readers []arbstate.DataAvailabilityReader - stats map[arbstate.DataAvailabilityReader]readerStats + readers []daprovider.DASReader + stats map[daprovider.DASReader]readerStats } -func (s *abstractAggregatorStrategy) update(readers []arbstate.DataAvailabilityReader, stats map[arbstate.DataAvailabilityReader]readerStats) { +func (s *abstractAggregatorStrategy) update(readers []daprovider.DASReader, stats map[daprovider.DASReader]readerStats) { s.Lock() defer s.Unlock() - s.readers = make([]arbstate.DataAvailabilityReader, len(readers)) + s.readers = make([]daprovider.DASReader, len(readers)) copy(s.readers, readers) - s.stats = make(map[arbstate.DataAvailabilityReader]readerStats) + s.stats = make(map[daprovider.DASReader]readerStats) for k, v := range stats { s.stats[k] = v } @@ -51,11 +51,11 @@ type simpleExploreExploitStrategy struct { func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance { iterations := atomic.AddUint32(&s.iterations, 1) - readerSets := make([][]arbstate.DataAvailabilityReader, 0) + readerSets := make([][]daprovider.DASReader, 0) s.RLock() defer s.RUnlock() - readers := make([]arbstate.DataAvailabilityReader, len(s.readers)) + readers := make([]daprovider.DASReader, len(s.readers)) copy(readers, s.readers) if iterations%(s.exploreIterations+s.exploitIterations) < s.exploreIterations { @@ -70,7 +70,7 @@ func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance } for i, maxTake := 0, 1; i < len(readers); maxTake = maxTake * 2 { - readerSet := make([]arbstate.DataAvailabilityReader, 0, maxTake) + readerSet := make([]daprovider.DASReader, 0, maxTake) for taken := 0; taken < maxTake && i < len(readers); i, taken = i+1, taken+1 { readerSet = append(readerSet, readers[i]) } @@ -91,7 +91,7 @@ func (s *testingSequentialStrategy) newInstance() aggregatorStrategyInstance { si := basicStrategyInstance{} for _, reader := range s.readers { - si.readerSets = append(si.readerSets, []arbstate.DataAvailabilityReader{reader}) + si.readerSets = append(si.readerSets, []daprovider.DASReader{reader}) } return &si @@ -99,14 +99,14 @@ func (s *testingSequentialStrategy) newInstance() aggregatorStrategyInstance { // Instance of a strategy that returns readers in an order according to the strategy type aggregatorStrategyInstance interface { - nextReaders() []arbstate.DataAvailabilityReader + nextReaders() []daprovider.DASReader } type basicStrategyInstance struct { - readerSets [][]arbstate.DataAvailabilityReader + readerSets [][]daprovider.DASReader } -func (si *basicStrategyInstance) nextReaders() []arbstate.DataAvailabilityReader { +func (si *basicStrategyInstance) nextReaders() []daprovider.DASReader { if len(si.readerSets) == 0 { return nil } diff --git a/das/reader_aggregator_strategies_test.go b/das/reader_aggregator_strategies_test.go index 987bc08938..cdb85b25e9 100644 --- a/das/reader_aggregator_strategies_test.go +++ b/das/reader_aggregator_strategies_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type dummyReader struct { @@ -26,13 +26,13 @@ func (*dummyReader) HealthCheck(context.Context) error { return errors.New("not implemented") } -func (*dummyReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (*dummyReader) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return -1, errors.New("not implemented") } func TestDAS_SimpleExploreExploit(t *testing.T) { - readers := []arbstate.DataAvailabilityReader{&dummyReader{0}, &dummyReader{1}, &dummyReader{2}, &dummyReader{3}, &dummyReader{4}, &dummyReader{5}} - stats := make(map[arbstate.DataAvailabilityReader]readerStats) + readers := []daprovider.DASReader{&dummyReader{0}, &dummyReader{1}, &dummyReader{2}, &dummyReader{3}, &dummyReader{4}, &dummyReader{5}} + stats := make(map[daprovider.DASReader]readerStats) stats[readers[0]] = []readerStat{ // weighted avg 10s {10 * time.Second, true}, } @@ -57,7 +57,7 @@ func TestDAS_SimpleExploreExploit(t *testing.T) { {8 * time.Second, true}, } - expectedOrdering := []arbstate.DataAvailabilityReader{readers[1], readers[2], readers[5], readers[4], readers[0], readers[3]} + expectedOrdering := []daprovider.DASReader{readers[1], readers[2], readers[5], readers[4], readers[0], readers[3]} expectedExploreIterations, expectedExploitIterations := uint32(5), uint32(5) strategy := simpleExploreExploitStrategy{ @@ -66,7 +66,7 @@ func TestDAS_SimpleExploreExploit(t *testing.T) { } strategy.update(readers, stats) - checkMatch := func(expected, was []arbstate.DataAvailabilityReader, doMatch bool) { + checkMatch := func(expected, was []daprovider.DASReader, doMatch bool) { if len(expected) != len(was) { Fail(t, fmt.Sprintf("Incorrect number of nextReaders %d, expected %d", len(was), len(expected))) } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 3449a8e78c..dbd85921ed 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -13,7 +13,7 @@ import ( "golang.org/x/crypto/sha3" "github.com/go-redis/redis/v8" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/redisutil" @@ -162,7 +162,7 @@ func (rs *RedisStorageService) Close(ctx context.Context) error { return rs.baseStorageService.Close(ctx) } -func (rs *RedisStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (rs *RedisStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return rs.baseStorageService.ExpirationPolicy(ctx) } diff --git a/das/redundant_storage_service.go b/das/redundant_storage_service.go index 74d32bd819..3158d28076 100644 --- a/das/redundant_storage_service.go +++ b/das/redundant_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" ) @@ -121,7 +121,7 @@ func (r *RedundantStorageService) Close(ctx context.Context) error { return anyError } -func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { // If at least one inner service has KeepForever, // then whole redundant service can serve after timeout. @@ -132,20 +132,20 @@ func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (arbstat // If no inner service has KeepForever, DiscardAfterArchiveTimeout, // but at least one inner service has DiscardAfterDataTimeout, // then whole redundant service can serve till data timeout. - var res arbstate.ExpirationPolicy = -1 + var res daprovider.ExpirationPolicy = -1 for _, serv := range r.innerServices { expirationPolicy, err := serv.ExpirationPolicy(ctx) if err != nil { return -1, err } switch expirationPolicy { - case arbstate.KeepForever: - return arbstate.KeepForever, nil - case arbstate.DiscardAfterArchiveTimeout: - res = arbstate.DiscardAfterArchiveTimeout - case arbstate.DiscardAfterDataTimeout: - if res != arbstate.DiscardAfterArchiveTimeout { - res = arbstate.DiscardAfterDataTimeout + case daprovider.KeepForever: + return daprovider.KeepForever, nil + case daprovider.DiscardAfterArchiveTimeout: + res = daprovider.DiscardAfterArchiveTimeout + case daprovider.DiscardAfterDataTimeout: + if res != daprovider.DiscardAfterArchiveTimeout { + res = daprovider.DiscardAfterDataTimeout } } } diff --git a/das/restful_client.go b/das/restful_client.go index 7d757c6bb8..b65426e7cd 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -14,11 +14,11 @@ import ( "strings" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" ) -// RestfulDasClient implements DataAvailabilityReader +// RestfulDasClient implements daprovider.DASReader type RestfulDasClient struct { url string } @@ -65,7 +65,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash common.Hash) ([]b return nil, err } if !dastree.ValidHash(hash, decodedBytes) { - return nil, arbstate.ErrHashMismatch + return nil, daprovider.ErrHashMismatch } return decodedBytes, nil @@ -82,7 +82,7 @@ func (c *RestfulDasClient) HealthCheck(ctx context.Context) error { return nil } -func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { res, err := http.Get(c.url + expirationPolicyRequestPath) if err != nil { return -1, err @@ -101,5 +101,5 @@ func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (arbstate.Expir return -1, err } - return arbstate.StringToExpirationPolicy(response.ExpirationPolicy) + return daprovider.StringToExpirationPolicy(response.ExpirationPolicy) } diff --git a/das/restful_server.go b/das/restful_server.go index 5c5e82e820..b1607729e2 100644 --- a/das/restful_server.go +++ b/das/restful_server.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/util/pretty" ) @@ -32,13 +32,13 @@ var ( type RestfulDasServer struct { server *http.Server - daReader arbstate.DataAvailabilityReader + daReader daprovider.DASReader daHealthChecker DataAvailabilityServiceHealthChecker httpServerExitedChan chan interface{} httpServerError error } -func NewRestfulDasServer(address string, port uint64, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader arbstate.DataAvailabilityReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { +func NewRestfulDasServer(address string, port uint64, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader daprovider.DASReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) if err != nil { return nil, err @@ -46,7 +46,7 @@ func NewRestfulDasServer(address string, port uint64, restServerTimeouts generic return NewRestfulDasServerOnListener(listener, restServerTimeouts, daReader, daHealthChecker) } -func NewRestfulDasServerOnListener(listener net.Listener, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader arbstate.DataAvailabilityReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { +func NewRestfulDasServerOnListener(listener net.Listener, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader daprovider.DASReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { ret := &RestfulDasServer{ daReader: daReader, diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 134c4229c8..490116a89a 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -12,7 +12,7 @@ import ( "math/bits" "net/url" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" @@ -102,7 +102,7 @@ func KeysetHashFromServices(services []ServiceDetails, assumedHonest uint64) ([3 return [32]byte{}, nil, errors.New("at least two signers share a mask") } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: uint64(assumedHonest), PubKeys: pubKeys, } diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 1a3ae94114..b5150fb8ed 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -15,7 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -145,11 +145,11 @@ func (s3s *S3StorageService) Close(ctx context.Context) error { return nil } -func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { if s3s.discardAfterTimeout { - return arbstate.DiscardAfterDataTimeout, nil + return daprovider.DiscardAfterDataTimeout, nil } - return arbstate.KeepForever, nil + return daprovider.KeepForever, nil } func (s3s *S3StorageService) String() string { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 50c4ee9aee..36c51c022e 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -18,7 +18,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -123,7 +123,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( return nil, err } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, PubKeys: []blsSignatures.PublicKey{publicKey}, } @@ -180,7 +180,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( func (d *SignAfterStoreDASWriter) Store( ctx context.Context, message []byte, timeout uint64, sig []byte, -) (c *arbstate.DataAvailabilityCertificate, err error) { +) (c *daprovider.DataAvailabilityCertificate, err error) { log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", d) var verified bool if d.extraBpVerifier != nil { @@ -201,7 +201,7 @@ func (d *SignAfterStoreDASWriter) Store( } } - c = &arbstate.DataAvailabilityCertificate{ + c = &daprovider.DataAvailabilityCertificate{ Timeout: timeout, DataHash: dastree.Hash(message), Version: 1, diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index eb82a33837..dc6147a7e4 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -80,7 +80,7 @@ func SimpleExploreExploitStrategyConfigAddOptions(prefix string, f *flag.FlagSet func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggregatorConfig) (*SimpleDASReaderAggregator, error) { a := SimpleDASReaderAggregator{ config: config, - stats: make(map[arbstate.DataAvailabilityReader]readerStats), + stats: make(map[daprovider.DASReader]readerStats), } combinedUrls := make(map[string]bool) @@ -160,7 +160,7 @@ type readerStat struct { type readerStatMessage struct { readerStat - reader arbstate.DataAvailabilityReader + reader daprovider.DASReader } type SimpleDASReaderAggregator struct { @@ -170,8 +170,8 @@ type SimpleDASReaderAggregator struct { readersMutex sync.RWMutex // readers and stats are only to be updated by the stats goroutine - readers []arbstate.DataAvailabilityReader - stats map[arbstate.DataAvailabilityReader]readerStats + readers []daprovider.DASReader + stats map[daprovider.DASReader]readerStats strategy aggregatorStrategy @@ -199,7 +199,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.H waitChan := make(chan interface{}) for _, reader := range readers { wg.Add(1) - go func(reader arbstate.DataAvailabilityReader) { + go func(reader daprovider.DASReader) { defer wg.Done() data, err := a.tryGetByHash(subCtx, hash, reader) if err != nil && errors.Is(ctx.Err(), context.Canceled) { @@ -243,7 +243,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.H } func (a *SimpleDASReaderAggregator) tryGetByHash( - ctx context.Context, hash common.Hash, reader arbstate.DataAvailabilityReader, + ctx context.Context, hash common.Hash, reader daprovider.DASReader, ) ([]byte, error) { stat := readerStatMessage{reader: reader} stat.success = false @@ -278,7 +278,7 @@ func (a *SimpleDASReaderAggregator) Start(ctx context.Context) { defer a.readersMutex.Unlock() combinedUrls := a.config.Urls combinedUrls = append(combinedUrls, urls...) - combinedReaders := make(map[arbstate.DataAvailabilityReader]bool) + combinedReaders := make(map[daprovider.DASReader]bool) for _, url := range combinedUrls { reader, err := NewRestfulDasClientFromURL(url) if err != nil { @@ -286,7 +286,7 @@ func (a *SimpleDASReaderAggregator) Start(ctx context.Context) { } combinedReaders[reader] = true } - a.readers = make([]arbstate.DataAvailabilityReader, 0, len(combinedUrls)) + a.readers = make([]daprovider.DASReader, 0, len(combinedUrls)) // Update reader and add newly added stats for reader := range combinedReaders { a.readers = append(a.readers, reader) @@ -350,7 +350,7 @@ func (a *SimpleDASReaderAggregator) HealthCheck(ctx context.Context) error { return nil } -func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { a.readersMutex.RLock() defer a.readersMutex.RUnlock() if len(a.readers) == 0 { @@ -368,7 +368,7 @@ func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (arbst return -1, err } if ep != expectedExpirationPolicy { - return arbstate.MixedTimeout, nil + return daprovider.MixedTimeout, nil } } return expectedExpirationPolicy, nil diff --git a/das/storage_service.go b/das/storage_service.go index 881d6fc8b1..806e80dba5 100644 --- a/das/storage_service.go +++ b/das/storage_service.go @@ -11,13 +11,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) var ErrNotFound = errors.New("not found") type StorageService interface { - arbstate.DataAvailabilityReader + daprovider.DASReader Put(ctx context.Context, data []byte, expirationTime uint64) error Sync(ctx context.Context) error Closer diff --git a/das/store_signing.go b/das/store_signing.go index 8039774b65..8ebc1a9805 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/signature" @@ -56,7 +56,7 @@ func NewStoreSigningDAS(inner DataAvailabilityServiceWriter, signer signature.Da return &StoreSigningDAS{inner, signer, addr}, nil } -func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.StoreSigningDAS.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) mySig, err := applyDasSigner(s.signer, message, timeout) if err != nil { diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index c79cd80400..411e7a1977 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -20,7 +20,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/arbmath" @@ -94,7 +94,7 @@ type l1SyncService struct { config SyncToStorageConfig syncTo StorageService - dataSource arbstate.DataAvailabilityReader + dataSource daprovider.DASReader l1Reader *headerreader.HeaderReader inboxContract *bridgegen.SequencerInbox @@ -161,7 +161,7 @@ func writeSyncState(syncDir string, blockNr uint64) error { return os.Rename(f.Name(), path) } -func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSource arbstate.DataAvailabilityReader, l1Reader *headerreader.HeaderReader, inboxAddr common.Address) (*l1SyncService, error) { +func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSource daprovider.DASReader, l1Reader *headerreader.HeaderReader, inboxAddr common.Address) (*l1SyncService, error) { l1Client := l1Reader.Client() inboxContract, err := bridgegen.NewSequencerInbox(inboxAddr, l1Client) if err != nil { @@ -213,9 +213,14 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere data = append(header, data...) preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - if _, err = arbstate.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimages, arbstate.KeysetValidate); err != nil { - log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) - return err + preimageRecorder := daprovider.RecordPreimagesTo(preimages) + if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimageRecorder, true); err != nil { + if errors.Is(err, daprovider.ErrSeqMsgValidation) { + log.Error(err.Error()) + } else { + log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) + return err + } } for _, preimages := range preimages { for hash, contents := range preimages { @@ -291,7 +296,7 @@ func FindDASDataFromLog( log.Warn("BatchDelivered - no data found", "data", data) return nil, nil } - if !arbstate.IsDASMessageHeaderByte(data[0]) { + if !daprovider.IsDASMessageHeaderByte(data[0]) { log.Warn("BatchDelivered - data not DAS") return nil, nil } @@ -417,7 +422,7 @@ type SyncingFallbackStorageService struct { func NewSyncingFallbackStorageService(ctx context.Context, primary StorageService, - backup arbstate.DataAvailabilityReader, + backup daprovider.DASReader, backupHealthChecker DataAvailabilityServiceHealthChecker, l1Reader *headerreader.HeaderReader, inboxAddr common.Address, diff --git a/das/util.go b/das/util.go index d98a2687fe..de266c433f 100644 --- a/das/util.go +++ b/das/util.go @@ -7,11 +7,11 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" ) -func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader, more ...interface{}) { +func logPut(store string, data []byte, timeout uint64, reader daprovider.DASReader, more ...interface{}) { if len(more) == 0 { log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), diff --git a/go-ethereum b/go-ethereum index 72f81daa8c..da519ddc4f 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 72f81daa8c59f044246b6e1f3eca08187edd7417 +Subproject commit da519ddc4fd5113a46da734e41b37369a1dce098 diff --git a/relay/relay.go b/relay/relay.go index 8e29971384..89bb899f29 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -10,8 +10,6 @@ import ( flag "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -120,7 +118,7 @@ func (r *Relay) StopAndWait() { type Config struct { Conf genericconf.ConfConfig `koanf:"conf"` Chain L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level"` + LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -133,7 +131,7 @@ type Config struct { var ConfigDefault = Config{ Conf: genericconf.ConfConfigDefault, Chain: L2ConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -146,7 +144,7 @@ var ConfigDefault = Config{ func ConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) L2ConfigAddOptions("chain", f) - f.Int("log-level", ConfigDefault.LogLevel, "log level") + f.String("log-level", ConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", ConfigDefault.LogType, "log type") f.Bool("metrics", ConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 56389ae80e..deaf4dc2dc 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -10,7 +10,6 @@ import ( "math/big" "time" - "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" @@ -50,7 +49,6 @@ type L1Validator struct { wallet ValidatorWalletInterface callOpts bind.CallOpts - das arbstate.DataAvailabilityReader inboxTracker InboxTrackerInterface txStreamer TransactionStreamerInterface blockValidator *BlockValidator @@ -62,7 +60,6 @@ func NewL1Validator( wallet ValidatorWalletInterface, validatorUtilsAddress common.Address, callOpts bind.CallOpts, - das arbstate.DataAvailabilityReader, inboxTracker InboxTrackerInterface, txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, @@ -90,7 +87,6 @@ func NewL1Validator( builder: builder, wallet: wallet, callOpts: callOpts, - das: das, inboxTracker: inboxTracker, txStreamer: txStreamer, blockValidator: blockValidator, diff --git a/staker/staker.go b/staker/staker.go index 2a95e9c9f7..da6413e122 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -291,7 +291,7 @@ func NewStaker( } client := l1Reader.Client() val, err := NewL1Validator(client, wallet, validatorUtilsAddress, callOpts, - statelessBlockValidator.daService, statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) + statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) if err != nil { return nil, err } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 19c61dae3d..49ce8f492c 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -9,6 +9,8 @@ import ( "fmt" "testing" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/ethdb" @@ -16,7 +18,6 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/rpcclient" @@ -38,8 +39,7 @@ type StatelessBlockValidator struct { inboxTracker InboxTrackerInterface streamer TransactionStreamerInterface db ethdb.Database - daService arbstate.DataAvailabilityReader - blobReader arbstate.BlobReader + dapReaders []daprovider.Reader } type BlockValidatorRegistrer interface { @@ -192,8 +192,7 @@ func NewStatelessBlockValidator( streamer TransactionStreamerInterface, recorder execution.ExecutionRecorder, arbdb ethdb.Database, - das arbstate.DataAvailabilityReader, - blobReader arbstate.BlobReader, + dapReaders []daprovider.Reader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -226,8 +225,7 @@ func NewStatelessBlockValidator( inboxTracker: inbox, streamer: streamer, db: arbdb, - daService: das, - blobReader: blobReader, + dapReaders: dapReaders, execSpawners: executionSpawners, }, nil } @@ -267,39 +265,27 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } - if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { - payload := batch.Data[41:] - if len(payload)%len(common.Hash{}) != 0 { - return fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(payload)/len(common.Hash{})) - for i := 0; i*32 < len(payload); i += 1 { - copy(versionedHashes[i][:], payload[i*32:(i+1)*32]) - } - blobs, err := v.blobReader.GetBlobs(ctx, batch.BlockHash, versionedHashes) - if err != nil { - return fmt.Errorf("failed to get blobs: %w", err) - } - if e.Preimages[arbutil.EthVersionedHashPreimageType] == nil { - e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) - } - for i, blob := range blobs { - // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable - // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview - b := blob - e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = b[:] - } - } - if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, - ) + foundDA := false + for _, dapReader := range v.dapReaders { + if dapReader != nil && dapReader.IsValidHeaderByte(batch.Data[40]) { + preimageRecorder := daprovider.RecordPreimagesTo(e.Preimages) + _, err := dapReader.RecoverPayloadFromBatch(ctx, batch.Number, batch.BlockHash, batch.Data, preimageRecorder, true) if err != nil { - return err + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(batch.Data[40]) { + log.Error(err.Error()) + } else { + return err + } } + foundDA = true + break + } + } + if !foundDA { + if daprovider.IsDASMessageHeaderByte(batch.Data[40]) { + log.Error("No DAS Reader configured, but sequencer message found with DAS header") } } } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 0fc127d0e3..6bc22b23da 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -171,7 +171,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, DeployInfo: builder.L2.ConsensusNode.DeployInfo, TransactOpts: &seqTxOpts, - DAWriter: nil, + DAPWriter: nil, ParentChainID: parentChainID, }, ) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index a9f655ff77..1b2b7ca6d6 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -20,7 +20,7 @@ import ( "github.com/go-redis/redis/v8" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -1035,7 +1035,7 @@ func authorizeDASKeyset( if dasSignerKey == nil { return } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, PubKeys: []blsSignatures.PublicKey{*dasSignerKey}, } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 197ea1a59f..e4c49c3227 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -390,7 +390,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -407,7 +407,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index d5bbeaa079..2d188295ec 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -208,7 +208,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeA, l2nodeA.ArbDB, nil, - nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -261,7 +260,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeB, l2nodeB.ArbDB, nil, - nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 9c34a82553..bb78bda480 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/statetransfer" ) @@ -41,7 +42,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, daprovider.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -121,7 +122,7 @@ func (c noopChainContext) GetHeader(common.Hash, uint64) *types.Header { func FuzzStateTransition(f *testing.F) { f.Fuzz(func(t *testing.T, compressSeqMsg bool, seqMsg []byte, delayedMsg []byte) { - if len(seqMsg) > 0 && arbstate.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { + if len(seqMsg) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(seqMsg[0]) { return } chainDb := rawdb.NewMemoryDatabase() @@ -176,7 +177,7 @@ func FuzzStateTransition(f *testing.F) { binary.BigEndian.PutUint64(seqBatch[24:32], ^uint64(0)) binary.BigEndian.PutUint64(seqBatch[32:40], uint64(len(delayedMessages))) if compressSeqMsg { - seqBatch = append(seqBatch, arbstate.BrotliMessageHeaderByte) + seqBatch = append(seqBatch, daprovider.BrotliMessageHeaderByte) seqMsgCompressed, err := arbcompress.CompressLevel(seqMsg, 0) if err != nil { panic(fmt.Sprintf("failed to compress sequencer message: %v", err))