From 3d2d427318fa744d250765fd867794e6a3b0a457 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 23 Feb 2024 16:31:54 -0600 Subject: [PATCH 01/40] Unified writer interface for Data Availability providers --- arbnode/batch_poster.go | 30 +-- arbnode/inbox_tracker.go | 15 +- arbnode/node.go | 14 +- arbnode/sequencer_inbox.go | 4 +- arbstate/daprovider/reader.go | 96 +++++++ .../{das_reader.go => daprovider/util.go} | 162 +++++++++++- arbstate/daprovider/writer.go | 48 ++++ arbstate/inbox.go | 234 +----------------- arbstate/inbox_fuzz_test.go | 3 +- .../data_availability_check.go | 8 +- cmd/datool/datool.go | 6 +- cmd/nitro/nitro.go | 4 +- cmd/replay/main.go | 17 +- das/aggregator.go | 14 +- das/aggregator_test.go | 4 +- das/bigcache_storage_service.go | 4 +- das/chain_fetch_das.go | 20 +- das/das.go | 27 +- das/dasRpcClient.go | 10 +- das/db_storage_service.go | 8 +- das/extra_signature_checker_test.go | 8 +- das/fallback_storage_service.go | 6 +- das/ipfs_storage_service.go | 6 +- das/local_file_storage_service.go | 6 +- das/memory_backed_storage_service.go | 6 +- das/panic_wrapper.go | 4 +- das/read_limited.go | 16 +- das/reader_aggregator_strategies.go | 28 +-- das/reader_aggregator_strategies_test.go | 12 +- das/redis_storage_service.go | 4 +- das/redundant_storage_service.go | 20 +- das/restful_client.go | 10 +- das/restful_server.go | 8 +- das/rpc_aggregator.go | 4 +- das/s3_storage_service.go | 8 +- das/sign_after_store_das_writer.go | 8 +- das/simple_das_reader_aggregator.go | 22 +- das/storage_service.go | 4 +- das/store_signing.go | 4 +- das/syncing_fallback_storage.go | 12 +- das/util.go | 4 +- staker/l1_validator.go | 6 +- staker/stateless_block_validator.go | 18 +- system_tests/batch_poster_test.go | 2 +- system_tests/common_test.go | 4 +- system_tests/state_fuzz_test.go | 5 +- 46 files changed, 512 insertions(+), 451 deletions(-) create mode 100644 arbstate/daprovider/reader.go rename arbstate/{das_reader.go => daprovider/util.go} (66%) create mode 100644 arbstate/daprovider/writer.go diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index e09775ea44..aa91fc3f36 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -37,10 +37,10 @@ import ( "github.com/offchainlabs/nitro/arbnode/redislock" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" - "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" @@ -87,7 +87,7 @@ type BatchPoster struct { bridgeAddr common.Address gasRefunderAddr common.Address building *buildingBatch - daWriter das.DataAvailabilityServiceWriter + dapWriter daprovider.Writer dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -117,7 +117,7 @@ const ( type BatchPosterConfig struct { Enable bool `koanf:"enable"` - DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` + DisableDapFallbackStoreDataOnChain bool `koanf:"disable-dap-fallback-store-data-on-chain" reload:"hot"` // Max batch size. MaxSize int `koanf:"max-size" reload:"hot"` // Maximum 4844 blob enabled batch size. @@ -176,7 +176,7 @@ type BatchPosterConfigFetcher func() *BatchPosterConfig func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") - f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") + f.Bool(prefix+".disable-dap-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDapFallbackStoreDataOnChain, "If unable to batch to DA provider, disable fallback storing data on chain") f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") @@ -200,7 +200,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, - DisableDasFallbackStoreDataOnChain: false, + DisableDapFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go MaxSize: 100000, // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? @@ -262,7 +262,7 @@ type BatchPosterOpts struct { Config BatchPosterConfigFetcher DeployInfo *chaininfo.RollupAddresses TransactOpts *bind.TransactOpts - DAWriter das.DataAvailabilityServiceWriter + DAPWriter daprovider.Writer ParentChainID *big.Int } @@ -308,7 +308,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e seqInboxAddr: opts.DeployInfo.SequencerInbox, gasRefunderAddr: opts.Config().gasRefunder, bridgeAddr: opts.DeployInfo.Bridge, - daWriter: opts.DAWriter, + dapWriter: opts.DAPWriter, redisLock: redisLock, } b.messagesPerBatch, err = arbmath.NewMovingAverage[uint64](20) @@ -787,7 +787,7 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { } compressedBytes := s.compressedBuffer.Bytes() fullMsg := make([]byte, 1, len(compressedBytes)+1) - fullMsg[0] = arbstate.BrotliMessageHeaderByte + fullMsg[0] = daprovider.BrotliMessageHeaderByte fullMsg = append(fullMsg, compressedBytes...) return fullMsg, nil } @@ -1131,7 +1131,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, nil } - if b.daWriter != nil { + if b.dapWriter != nil { if !b.redisLock.AttemptLock(ctx) { return false, errAttemptLockFailed } @@ -1143,17 +1143,9 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if nonce != gotNonce || !bytes.Equal(batchPositionBytes, gotMeta) { return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } - - cert, err := b.daWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}) // b.daWriter will append signature if enabled - if errors.Is(err, das.BatchToDasFailed) { - if config.DisableDasFallbackStoreDataOnChain { - return false, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") - } - log.Warn("Falling back to storing data on chain", "err", err) - } else if err != nil { + sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), []byte{}, config.DisableDapFallbackStoreDataOnChain) + if err != nil { return false, err - } else { - sequencerMsg = das.Serialize(cert) } } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index f98f93a3eb..a20807b82f 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -20,6 +20,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" m "github.com/offchainlabs/nitro/broadcaster/message" @@ -37,14 +38,14 @@ type InboxTracker struct { txStreamer *TransactionStreamer mutex sync.Mutex validator *staker.BlockValidator - das arbstate.DataAvailabilityReader - blobReader arbstate.BlobReader + das daprovider.DASReader + blobReader daprovider.BlobReader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das daprovider.DASReader, blobReader daprovider.BlobReader) (*InboxTracker, error) { // We support a nil txStreamer for the pruning code if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { return nil, errors.New("data availability service required but unconfigured") @@ -606,14 +607,14 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - var daProviders []arbstate.DataAvailabilityProvider + var daProviders []daprovider.Reader if t.das != nil { - daProviders = append(daProviders, arbstate.NewDAProviderDAS(t.das)) + daProviders = append(daProviders, daprovider.NewReaderForDAS(t.das)) } if t.blobReader != nil { - daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(t.blobReader)) + daProviders = append(daProviders, daprovider.NewReaderForBlobReader(t.blobReader)) } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, arbstate.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, daprovider.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index 9f66710623..11e80b8225 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -26,7 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -251,7 +251,7 @@ type Node struct { L1Reader *headerreader.HeaderReader TxStreamer *TransactionStreamer DeployInfo *chaininfo.RollupAddresses - BlobReader arbstate.BlobReader + BlobReader daprovider.BlobReader InboxReader *InboxReader InboxTracker *InboxTracker DelayedSequencer *DelayedSequencer @@ -370,7 +370,7 @@ func createNodeImpl( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, - blobReader arbstate.BlobReader, + blobReader daprovider.BlobReader, ) (*Node, error) { config := configFetcher.Get() @@ -661,6 +661,10 @@ func createNodeImpl( if txOptsBatchPoster == nil && config.BatchPoster.DataPoster.ExternalSigner.URL == "" { return nil, errors.New("batchposter, but no TxOpts") } + var dapWriter daprovider.Writer + if daWriter != nil { + dapWriter = daprovider.NewWriterForDAS(daWriter) + } batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), L1Reader: l1Reader, @@ -671,7 +675,7 @@ func createNodeImpl( Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, DeployInfo: deployInfo, TransactOpts: txOptsBatchPoster, - DAWriter: daWriter, + DAPWriter: dapWriter, ParentChainID: parentChainID, }) if err != nil { @@ -732,7 +736,7 @@ func CreateNode( dataSigner signature.DataSignerFunc, fatalErrChan chan error, parentChainID *big.Int, - blobReader arbstate.BlobReader, + blobReader daprovider.BlobReader, ) (*Node, error) { currentNode, err := createNodeImpl(ctx, stack, exec, arbDb, configFetcher, l2Config, l1client, deployInfo, txOptsValidator, txOptsBatchPoster, dataSigner, fatalErrChan, parentChainID, blobReader) if err != nil { diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index b743bf0ef9..d86a640928 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,7 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -159,7 +159,7 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut if len(tx.BlobHashes()) == 0 { return nil, fmt.Errorf("blob batch transaction %v has no blobs", tx.Hash()) } - data := []byte{arbstate.BlobHashesHeaderFlag} + data := []byte{daprovider.BlobHashesHeaderFlag} for _, h := range tx.BlobHashes() { data = append(data, h[:]...) } diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go new file mode 100644 index 0000000000..f4be3aeb86 --- /dev/null +++ b/arbstate/daprovider/reader.go @@ -0,0 +1,96 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package daprovider + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/blobs" +) + +type Reader interface { + // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider + IsValidHeaderByte(headerByte byte) bool + + // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information + RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, + ) ([]byte, error) +} + +// NewReaderForDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the Reader interface independently +func NewReaderForDAS(dasReader DASReader) *readerForDAS { + return &readerForDAS{dasReader: dasReader} +} + +type readerForDAS struct { + dasReader DASReader +} + +func (d *readerForDAS) IsValidHeaderByte(headerByte byte) bool { + return IsDASMessageHeaderByte(headerByte) +} + +func (d *readerForDAS) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, preimages, keysetValidationMode) +} + +// NewReaderForBlobReader is generally meant to be only used by nitro. +// DA Providers should implement methods in the Reader interface independently +func NewReaderForBlobReader(blobReader BlobReader) *readerForBlobReader { + return &readerForBlobReader{blobReader: blobReader} +} + +type readerForBlobReader struct { + blobReader BlobReader +} + +func (b *readerForBlobReader) IsValidHeaderByte(headerByte byte) bool { + return IsBlobHashesHeaderByte(headerByte) +} + +func (b *readerForBlobReader) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + blobHashes := sequencerMsg[41:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err := blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return nil, nil + } + return payload, nil +} diff --git a/arbstate/das_reader.go b/arbstate/daprovider/util.go similarity index 66% rename from arbstate/das_reader.go rename to arbstate/daprovider/util.go index f131a53608..2d887d2711 100644 --- a/arbstate/das_reader.go +++ b/arbstate/daprovider/util.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package arbstate +package daprovider import ( "bufio" @@ -13,18 +13,35 @@ import ( "io" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" ) -type DataAvailabilityReader interface { +type DASReader interface { GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) ExpirationPolicy(ctx context.Context) (ExpirationPolicy, error) } -var ErrHashMismatch = errors.New("result does not match expected hash") +type DASWriter interface { + // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). + Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*DataAvailabilityCertificate, error) + fmt.Stringer +} + +type BlobReader interface { + GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, + ) ([]kzg4844.Blob, error) + Initialize(ctx context.Context) error +} // DASMessageHeaderFlag indicates that this data is a certificate for the data availability service, // which will retrieve the full batch data. @@ -83,6 +100,124 @@ func IsKnownHeaderByte(b uint8) bool { return b&^KnownHeaderBits == 0 } +const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week +var ErrHashMismatch = errors.New("result does not match expected hash") +var ErrBatchToDasFailed = errors.New("unable to batch to DAS") + +type KeysetValidationMode uint8 + +const KeysetValidate KeysetValidationMode = 0 +const KeysetPanicIfInvalid KeysetValidationMode = 1 +const KeysetDontValidate KeysetValidationMode = 2 + +func RecoverPayloadFromDasBatch( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + dasReader DASReader, + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + keysetValidationMode KeysetValidationMode, +) ([]byte, error) { + var keccakPreimages map[common.Hash][]byte + if preimages != nil { + if preimages[arbutil.Keccak256PreimageType] == nil { + preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + keccakPreimages = preimages[arbutil.Keccak256PreimageType] + } + cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) + if err != nil { + log.Error("Failed to deserialize DAS message", "err", err) + return nil, nil + } + version := cert.Version + recordPreimage := func(key common.Hash, value []byte) { + keccakPreimages[key] = value + } + + if version >= 2 { + log.Error("Your node software is probably out of date", "certificateVersion", version) + return nil, nil + } + + getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { + newHash := hash + if version == 0 { + newHash = dastree.FlatHashToTreeHash(hash) + } + + preimage, err := dasReader.GetByHash(ctx, newHash) + if err != nil && hash != newHash { + log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) + preimage, err = dasReader.GetByHash(ctx, hash) + } + if err != nil { + return nil, err + } + + switch { + case version == 0 && crypto.Keccak256Hash(preimage) != hash: + fallthrough + case version == 1 && dastree.Hash(preimage) != hash: + log.Error( + "preimage mismatch for hash", + "hash", hash, "err", ErrHashMismatch, "version", version, + ) + return nil, ErrHashMismatch + } + return preimage, nil + } + + keysetPreimage, err := getByHash(ctx, cert.KeysetHash) + if err != nil { + log.Error("Couldn't get keyset", "err", err) + return nil, err + } + if keccakPreimages != nil { + dastree.RecordHash(recordPreimage, keysetPreimage) + } + + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), keysetValidationMode == KeysetDontValidate) + if err != nil { + logLevel := log.Error + if keysetValidationMode == KeysetPanicIfInvalid { + logLevel = log.Crit + } + logLevel("Couldn't deserialize keyset", "err", err, "keysetHash", cert.KeysetHash, "batchNum", batchNum) + return nil, nil + } + err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) + if err != nil { + log.Error("Bad signature on DAS batch", "err", err) + return nil, nil + } + + maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) + if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { + log.Error("Data availability cert expires too soon", "err", "") + return nil, nil + } + + dataHash := cert.DataHash + payload, err := getByHash(ctx, dataHash) + if err != nil { + log.Error("Couldn't fetch DAS batch contents", "err", err) + return nil, err + } + + if keccakPreimages != nil { + if version == 0 { + treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) + keccakPreimages[dataHash] = payload + keccakPreimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf + } else { + dastree.RecordHash(recordPreimage, payload) + } + } + + return payload, nil +} + type DataAvailabilityCertificate struct { KeysetHash [32]byte DataHash [32]byte @@ -167,7 +302,7 @@ func (c *DataAvailabilityCertificate) SerializeSignableFields() []byte { func (c *DataAvailabilityCertificate) RecoverKeyset( ctx context.Context, - da DataAvailabilityReader, + da DASReader, assumeKeysetValid bool, ) (*DataAvailabilityKeyset, error) { keysetBytes, err := da.GetByHash(ctx, c.KeysetHash) @@ -316,3 +451,22 @@ func StringToExpirationPolicy(s string) (ExpirationPolicy, error) { return -1, fmt.Errorf("invalid Expiration Policy: %s", s) } } + +func Serialize(c *DataAvailabilityCertificate) []byte { + + flags := DASMessageHeaderFlag + if c.Version != 0 { + flags |= TreeDASMessageHeaderFlag + } + + buf := make([]byte, 0) + buf = append(buf, flags) + buf = append(buf, c.KeysetHash[:]...) + buf = append(buf, c.SerializeSignableFields()...) + + var intData [8]byte + binary.BigEndian.PutUint64(intData[:], c.SignersMask) + buf = append(buf, intData[:]...) + + return append(buf, blsSignatures.SignatureToBytes(c.Sig)...) +} diff --git a/arbstate/daprovider/writer.go b/arbstate/daprovider/writer.go new file mode 100644 index 0000000000..44c53fb87d --- /dev/null +++ b/arbstate/daprovider/writer.go @@ -0,0 +1,48 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package daprovider + +import ( + "context" + "errors" + + "github.com/ethereum/go-ethereum/log" +) + +type Writer interface { + // Store posts the batch data to the invoking DA provider + // And returns sequencerMsg which is later used to retrieve the batch data + Store( + ctx context.Context, + message []byte, + timeout uint64, + sig []byte, + disableFallbackStoreDataOnChain bool, + ) ([]byte, error) +} + +// DAProviderWriterForDAS is generally meant to be only used by nitro. +// DA Providers should implement methods in the DAProviderWriter interface independently +func NewWriterForDAS(dasWriter DASWriter) *writerForDAS { + return &writerForDAS{dasWriter: dasWriter} +} + +type writerForDAS struct { + dasWriter DASWriter +} + +func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte, disableFallbackStoreDataOnChain bool) ([]byte, error) { + cert, err := d.dasWriter.Store(ctx, message, timeout, []byte{}) // b.daWriter will append signature if enabled + if errors.Is(err, ErrBatchToDasFailed) { + if disableFallbackStoreDataOnChain { + return nil, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") + } + log.Warn("Falling back to storing data on chain", "err", err) + return message, nil + } else if err != nil { + return nil, err + } else { + return Serialize(cert), nil + } +} diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 49192f9d37..676ef44df0 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -13,8 +13,6 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -22,9 +20,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l1pricing" - "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/zeroheavy" ) @@ -40,15 +36,6 @@ type InboxBackend interface { ReadDelayedInbox(seqNum uint64) (*arbostypes.L1IncomingMessage, error) } -type BlobReader interface { - GetBlobs( - ctx context.Context, - batchBlockHash common.Hash, - versionedHashes []common.Hash, - ) ([]kzg4844.Blob, error) - Initialize(ctx context.Context) error -} - type sequencerMessage struct { minTimestamp uint64 maxTimestamp uint64 @@ -61,9 +48,8 @@ type sequencerMessage struct { const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -81,7 +67,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // If the parent chain sequencer inbox smart contract authenticated this batch, // an unknown header byte must mean that this node is out of date, // because the smart contract understands the header byte and this node doesn't. - if len(payload) > 0 && IsL1AuthenticatedMessageHeaderByte(payload[0]) && !IsKnownHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsL1AuthenticatedMessageHeaderByte(payload[0]) && !daprovider.IsKnownHeaderByte(payload[0]) { return nil, fmt.Errorf("%w: batch has unsupported authenticated header byte 0x%02x", arbosState.ErrFatalNodeOutOfDate, payload[0]) } @@ -107,9 +93,9 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } if !foundDA { - if IsDASMessageHeaderByte(payload[0]) { + if daprovider.IsDASMessageHeaderByte(payload[0]) { log.Error("No DAS Reader configured, but sequencer message found with DAS header") - } else if IsBlobHashesHeaderByte(payload[0]) { + } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") } } @@ -119,7 +105,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // It's not safe to trust any part of the payload from this point onwards. // Stage 2: If enabled, decode the zero heavy payload (saves gas based on calldata charging). - if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { log.Warn("error reading from zeroheavy decoder", err.Error()) @@ -129,7 +115,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash } // Stage 3: Decompress the brotli payload and fill the parsedMsg.segments list. - if len(payload) > 0 && IsBrotliMessageHeaderByte(payload[0]) { + if len(payload) > 0 && daprovider.IsBrotliMessageHeaderByte(payload[0]) { decompressed, err := arbcompress.Decompress(payload[1:], MaxDecompressedLen) if err == nil { reader := bytes.NewReader(decompressed) @@ -165,220 +151,20 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash return parsedMsg, nil } -func RecoverPayloadFromDasBatch( - ctx context.Context, - batchNum uint64, - sequencerMsg []byte, - dasReader DataAvailabilityReader, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - var keccakPreimages map[common.Hash][]byte - if preimages != nil { - if preimages[arbutil.Keccak256PreimageType] == nil { - preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - keccakPreimages = preimages[arbutil.Keccak256PreimageType] - } - cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) - if err != nil { - log.Error("Failed to deserialize DAS message", "err", err) - return nil, nil - } - version := cert.Version - recordPreimage := func(key common.Hash, value []byte) { - keccakPreimages[key] = value - } - - if version >= 2 { - log.Error("Your node software is probably out of date", "certificateVersion", version) - return nil, nil - } - - getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { - newHash := hash - if version == 0 { - newHash = dastree.FlatHashToTreeHash(hash) - } - - preimage, err := dasReader.GetByHash(ctx, newHash) - if err != nil && hash != newHash { - log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) - preimage, err = dasReader.GetByHash(ctx, hash) - } - if err != nil { - return nil, err - } - - switch { - case version == 0 && crypto.Keccak256Hash(preimage) != hash: - fallthrough - case version == 1 && dastree.Hash(preimage) != hash: - log.Error( - "preimage mismatch for hash", - "hash", hash, "err", ErrHashMismatch, "version", version, - ) - return nil, ErrHashMismatch - } - return preimage, nil - } - - keysetPreimage, err := getByHash(ctx, cert.KeysetHash) - if err != nil { - log.Error("Couldn't get keyset", "err", err) - return nil, err - } - if keccakPreimages != nil { - dastree.RecordHash(recordPreimage, keysetPreimage) - } - - keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), keysetValidationMode == KeysetDontValidate) - if err != nil { - logLevel := log.Error - if keysetValidationMode == KeysetPanicIfInvalid { - logLevel = log.Crit - } - logLevel("Couldn't deserialize keyset", "err", err, "keysetHash", cert.KeysetHash, "batchNum", batchNum) - return nil, nil - } - err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) - if err != nil { - log.Error("Bad signature on DAS batch", "err", err) - return nil, nil - } - - maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) - if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { - log.Error("Data availability cert expires too soon", "err", "") - return nil, nil - } - - dataHash := cert.DataHash - payload, err := getByHash(ctx, dataHash) - if err != nil { - log.Error("Couldn't fetch DAS batch contents", "err", err) - return nil, err - } - - if keccakPreimages != nil { - if version == 0 { - treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - keccakPreimages[dataHash] = payload - keccakPreimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf - } else { - dastree.RecordHash(recordPreimage, payload) - } - } - - return payload, nil -} - -type DataAvailabilityProvider interface { - // IsValidHeaderByte returns true if the given headerByte has bits corresponding to the DA provider - IsValidHeaderByte(headerByte byte) bool - - // RecoverPayloadFromBatch fetches the underlying payload from the DA provider given the batch header information - RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, - ) ([]byte, error) -} - -// NewDAProviderDAS is generally meant to be only used by nitro. -// DA Providers should implement methods in the DataAvailabilityProvider interface independently -func NewDAProviderDAS(das DataAvailabilityReader) *dAProviderForDAS { - return &dAProviderForDAS{ - das: das, - } -} - -type dAProviderForDAS struct { - das DataAvailabilityReader -} - -func (d *dAProviderForDAS) IsValidHeaderByte(headerByte byte) bool { - return IsDASMessageHeaderByte(headerByte) -} - -func (d *dAProviderForDAS) RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.das, preimages, keysetValidationMode) -} - -// NewDAProviderBlobReader is generally meant to be only used by nitro. -// DA Providers should implement methods in the DataAvailabilityProvider interface independently -func NewDAProviderBlobReader(blobReader BlobReader) *dAProviderForBlobReader { - return &dAProviderForBlobReader{ - blobReader: blobReader, - } -} - -type dAProviderForBlobReader struct { - blobReader BlobReader -} - -func (b *dAProviderForBlobReader) IsValidHeaderByte(headerByte byte) bool { - return IsBlobHashesHeaderByte(headerByte) -} - -func (b *dAProviderForBlobReader) RecoverPayloadFromBatch( - ctx context.Context, - batchNum uint64, - batchBlockHash common.Hash, - sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, -) ([]byte, error) { - blobHashes := sequencerMsg[41:] - if len(blobHashes)%len(common.Hash{}) != 0 { - return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) - for i := 0; i*32 < len(blobHashes); i += 1 { - copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) - } - kzgBlobs, err := b.blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) - if err != nil { - return nil, fmt.Errorf("failed to get blobs: %w", err) - } - payload, err := blobs.DecodeBlobs(kzgBlobs) - if err != nil { - log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) - return nil, nil - } - return payload, nil -} - -type KeysetValidationMode uint8 - -const KeysetValidate KeysetValidationMode = 0 -const KeysetPanicIfInvalid KeysetValidationMode = 1 -const KeysetDontValidate KeysetValidationMode = 2 - type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - daProviders []DataAvailabilityProvider + daProviders []daprovider.Reader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 cachedSegmentTimestamp uint64 cachedSegmentBlockNumber uint64 cachedSubMessageNumber uint64 - keysetValidationMode KeysetValidationMode + keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []DataAvailabilityProvider, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index b34c02534b..5ede321810 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type multiplexerBackend struct { @@ -67,7 +68,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, daprovider.KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/dataavailability/data_availability_check.go b/cmd/dataavailability/data_availability_check.go index 72a311a7be..d80c0475bf 100644 --- a/cmd/dataavailability/data_availability_check.go +++ b/cmd/dataavailability/data_availability_check.go @@ -21,7 +21,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/das" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -65,7 +65,7 @@ type DataAvailabilityCheck struct { config *DataAvailabilityCheckConfig inboxAddr *common.Address inboxContract *bridgegen.SequencerInbox - urlToReaderMap map[string]arbstate.DataAvailabilityReader + urlToReaderMap map[string]daprovider.DASReader checkInterval time.Duration } @@ -86,7 +86,7 @@ func newDataAvailabilityCheck(ctx context.Context, dataAvailabilityCheckConfig * if err != nil { return nil, err } - urlToReaderMap := make(map[string]arbstate.DataAvailabilityReader, len(onlineUrls)) + urlToReaderMap := make(map[string]daprovider.DASReader, len(onlineUrls)) for _, url := range onlineUrls { reader, err := das.NewRestfulDasClientFromURL(url) if err != nil { @@ -238,7 +238,7 @@ func (d *DataAvailabilityCheck) checkDataAvailability(ctx context.Context, deliv if data == nil { return false, nil } - cert, err := arbstate.DeserializeDASCertFrom(bytes.NewReader(data)) + cert, err := daprovider.DeserializeDASCertFrom(bytes.NewReader(data)) if err != nil { return true, err } diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index d78d975fd5..3f64a990ca 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -22,7 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util" @@ -165,7 +165,7 @@ func startClientStore(args []string) error { } ctx := context.Background() - var cert *arbstate.DataAvailabilityCertificate + var cert *daprovider.DataAvailabilityCertificate if config.RandomMessageSize > 0 { message := make([]byte, config.RandomMessageSize) @@ -184,7 +184,7 @@ func startClientStore(args []string) error { return err } - serializedCert := das.Serialize(cert) + serializedCert := daprovider.Serialize(cert) fmt.Printf("Hex Encoded Cert: %s\n", hexutil.Encode(serializedCert)) fmt.Printf("Hex Encoded Data Hash: %s\n", hexutil.Encode(cert.DataHash[:])) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index c32d2e6c80..d40eedfdb1 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -42,7 +42,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/resourcemanager" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" blocksreexecutor "github.com/offchainlabs/nitro/blocks_reexecutor" "github.com/offchainlabs/nitro/cmd/chaininfo" @@ -331,7 +331,7 @@ func mainImpl() int { var rollupAddrs chaininfo.RollupAddresses var l1Client *ethclient.Client var l1Reader *headerreader.HeaderReader - var blobReader arbstate.BlobReader + var blobReader daprovider.BlobReader if nodeConfig.Node.ParentChainReader.Enable { confFetcher := func() *rpcclient.ClientConfig { return &liveNodeConfig.Get().ParentChain.Connection } rpcClient := rpcclient.NewRpcClient(confFetcher, nil) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 7ab59fc513..23273fcfb0 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -27,6 +27,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das/dastree" @@ -115,8 +116,8 @@ func (dasReader *PreimageDASReader) HealthCheck(ctx context.Context) error { return nil } -func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.DiscardImmediately, nil +func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.DiscardImmediately, nil } type BlobPreimageReader struct { @@ -201,20 +202,20 @@ func main() { if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - var dasReader arbstate.DataAvailabilityReader + var dasReader daprovider.DASReader if dasEnabled { dasReader = &PreimageDASReader{} } backend := WavmInbox{} - var keysetValidationMode = arbstate.KeysetPanicIfInvalid + var keysetValidationMode = daprovider.KeysetPanicIfInvalid if backend.GetPositionWithinMessage() > 0 { - keysetValidationMode = arbstate.KeysetDontValidate + keysetValidationMode = daprovider.KeysetDontValidate } - var daProviders []arbstate.DataAvailabilityProvider + var daProviders []daprovider.Reader if dasReader != nil { - daProviders = append(daProviders, arbstate.NewDAProviderDAS(dasReader)) + daProviders = append(daProviders, daprovider.NewReaderForDAS(dasReader)) } - daProviders = append(daProviders, arbstate.NewDAProviderBlobReader(&BlobPreimageReader{})) + daProviders = append(daProviders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) diff --git a/das/aggregator.go b/das/aggregator.go index 4b4571eb43..d3edd58437 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" @@ -37,8 +37,6 @@ var DefaultAggregatorConfig = AggregatorConfig{ Backends: "", } -var BatchToDasFailed = errors.New("unable to batch to DAS") - func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Bool(prefix+".enable", DefaultAggregatorConfig.Enable, "enable storage/retrieval of sequencer batch data from a list of RPC endpoints; this should only be used by the batch poster and not in combination with other DAS storage types") f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") @@ -164,7 +162,7 @@ type storeResponse struct { // constructed, calls to Store(...) will try to verify the passed-in data's signature // is from the batch poster. If the contract details are not provided, then the // signature is not checked, which is useful for testing. -func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.Aggregator.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig)) if a.addrVerifier != nil { actualSigner, err := DasRecoverSigner(message, timeout, sig) @@ -243,7 +241,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, }(ctx, d) } - var aggCert arbstate.DataAvailabilityCertificate + var aggCert daprovider.DataAvailabilityCertificate type certDetails struct { pubKeys []blsSignatures.PublicKey @@ -296,7 +294,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, } } else if storeFailures > a.maxAllowedServiceStoreFailures { cd := certDetails{} - cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, BatchToDasFailed) + cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed) certDetailsChan <- cd returned = true } @@ -323,10 +321,10 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64, verified, err := blsSignatures.VerifySignature(aggCert.Sig, aggCert.SerializeSignableFields(), aggPubKey) if err != nil { //nolint:errorlint - return nil, fmt.Errorf("%s. %w", err.Error(), BatchToDasFailed) + return nil, fmt.Errorf("%s. %w", err.Error(), daprovider.ErrBatchToDasFailed) } if !verified { - return nil, fmt.Errorf("failed aggregate signature check. %w", BatchToDasFailed) + return nil, fmt.Errorf("failed aggregate signature check. %w", daprovider.ErrBatchToDasFailed) } return &aggCert, nil } diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 776af3975b..51b523ce72 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -15,10 +15,10 @@ import ( "testing" "time" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" ) func TestDAS_BasicAggregationLocal(t *testing.T) { @@ -122,7 +122,7 @@ type WrapStore struct { DataAvailabilityServiceWriter } -func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (w *WrapStore) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { switch w.injector.shouldFail() { case success: return w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go index f8421bed1d..f3586c8276 100644 --- a/das/bigcache_storage_service.go +++ b/das/bigcache_storage_service.go @@ -9,7 +9,7 @@ import ( "time" "github.com/allegro/bigcache" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" @@ -103,7 +103,7 @@ func (bcs *BigCacheStorageService) Close(ctx context.Context) error { return bcs.baseStorageService.Close(ctx) } -func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return bcs.baseStorageService.ExpirationPolicy(ctx) } diff --git a/das/chain_fetch_das.go b/das/chain_fetch_das.go index bc8ab5bc19..99311decaa 100644 --- a/das/chain_fetch_das.go +++ b/das/chain_fetch_das.go @@ -8,7 +8,7 @@ import ( "errors" "sync" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -38,13 +38,13 @@ func (c *syncedKeysetCache) put(key [32]byte, value []byte) { } type ChainFetchReader struct { - arbstate.DataAvailabilityReader + daprovider.DASReader seqInboxCaller *bridgegen.SequencerInboxCaller seqInboxFilterer *bridgegen.SequencerInboxFilterer keysetCache syncedKeysetCache } -func NewChainFetchReader(inner arbstate.DataAvailabilityReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { +func NewChainFetchReader(inner daprovider.DASReader, l1client arbutil.L1Interface, seqInboxAddr common.Address) (*ChainFetchReader, error) { seqInbox, err := bridgegen.NewSequencerInbox(seqInboxAddr, l1client) if err != nil { return nil, err @@ -53,18 +53,18 @@ func NewChainFetchReader(inner arbstate.DataAvailabilityReader, l1client arbutil return NewChainFetchReaderWithSeqInbox(inner, seqInbox) } -func NewChainFetchReaderWithSeqInbox(inner arbstate.DataAvailabilityReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { +func NewChainFetchReaderWithSeqInbox(inner daprovider.DASReader, seqInbox *bridgegen.SequencerInbox) (*ChainFetchReader, error) { return &ChainFetchReader{ - DataAvailabilityReader: inner, - seqInboxCaller: &seqInbox.SequencerInboxCaller, - seqInboxFilterer: &seqInbox.SequencerInboxFilterer, - keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, + DASReader: inner, + seqInboxCaller: &seqInbox.SequencerInboxCaller, + seqInboxFilterer: &seqInbox.SequencerInboxFilterer, + keysetCache: syncedKeysetCache{cache: make(map[[32]byte][]byte)}, }, nil } func (c *ChainFetchReader) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { log.Trace("das.ChainFetchReader.GetByHash", "hash", pretty.PrettyHash(hash)) - return chainFetchGetByHash(ctx, c.DataAvailabilityReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) + return chainFetchGetByHash(ctx, c.DASReader, &c.keysetCache, c.seqInboxCaller, c.seqInboxFilterer, hash) } func (c *ChainFetchReader) String() string { return "ChainFetchReader" @@ -72,7 +72,7 @@ func (c *ChainFetchReader) String() string { func chainFetchGetByHash( ctx context.Context, - daReader arbstate.DataAvailabilityReader, + daReader daprovider.DASReader, cache *syncedKeysetCache, seqInboxCaller *bridgegen.SequencerInboxCaller, seqInboxFilterer *bridgegen.SequencerInboxFilterer, diff --git a/das/das.go b/das/das.go index 910e511083..6ec9ff06ef 100644 --- a/das/das.go +++ b/das/das.go @@ -5,7 +5,6 @@ package das import ( "context" - "encoding/binary" "errors" "fmt" "math" @@ -16,18 +15,17 @@ import ( "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" - "github.com/offchainlabs/nitro/arbstate" - "github.com/offchainlabs/nitro/blsSignatures" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type DataAvailabilityServiceWriter interface { // Store requests that the message be stored until timeout (UTC time in unix epoch seconds). - Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) + Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) fmt.Stringer } type DataAvailabilityServiceReader interface { - arbstate.DataAvailabilityReader + daprovider.DASReader fmt.Stringer } @@ -138,25 +136,6 @@ func dataAvailabilityConfigAddOptions(prefix string, f *flag.FlagSet, r role) { f.String(prefix+".sequencer-inbox-address", DefaultDataAvailabilityConfig.SequencerInboxAddress, "parent chain address of SequencerInbox contract") } -func Serialize(c *arbstate.DataAvailabilityCertificate) []byte { - - flags := arbstate.DASMessageHeaderFlag - if c.Version != 0 { - flags |= arbstate.TreeDASMessageHeaderFlag - } - - buf := make([]byte, 0) - buf = append(buf, flags) - buf = append(buf, c.KeysetHash[:]...) - buf = append(buf, c.SerializeSignableFields()...) - - var intData [8]byte - binary.BigEndian.PutUint64(intData[:], c.SignersMask) - buf = append(buf, intData[:]...) - - return append(buf, blsSignatures.SignatureToBytes(c.Sig)...) -} - func GetL1Client(ctx context.Context, maxConnectionAttempts int, l1URL string) (*ethclient.Client, error) { if maxConnectionAttempts <= 0 { maxConnectionAttempts = math.MaxInt diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 54d8eba94c..5fca1e449f 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -13,7 +13,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/util/pretty" ) @@ -34,7 +34,7 @@ func NewDASRPCClient(target string) (*DASRPCClient, error) { }, nil } -func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64, reqSig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.DASRPCClient.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(reqSig), "this", *c) var ret StoreResult if err := c.clnt.CallContext(ctx, &ret, "das_store", hexutil.Bytes(message), hexutil.Uint64(timeout), hexutil.Bytes(reqSig)); err != nil { @@ -44,7 +44,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 if err != nil { return nil, err } - return &arbstate.DataAvailabilityCertificate{ + return &daprovider.DataAvailabilityCertificate{ DataHash: common.BytesToHash(ret.DataHash), Timeout: uint64(ret.Timeout), SignersMask: uint64(ret.SignersMask), @@ -62,11 +62,11 @@ func (c *DASRPCClient) HealthCheck(ctx context.Context) error { return c.clnt.CallContext(ctx, nil, "das_healthCheck") } -func (c *DASRPCClient) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *DASRPCClient) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { var res string err := c.clnt.CallContext(ctx, &res, "das_expirationPolicy") if err != nil { return -1, err } - return arbstate.StringToExpirationPolicy(res) + return daprovider.StringToExpirationPolicy(res) } diff --git a/das/db_storage_service.go b/das/db_storage_service.go index b9af530b9e..52a33b6f64 100644 --- a/das/db_storage_service.go +++ b/das/db_storage_service.go @@ -12,7 +12,7 @@ import ( badger "github.com/dgraph-io/badger/v3" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -135,11 +135,11 @@ func (dbs *DBStorageService) Close(ctx context.Context) error { return dbs.stopWaiter.StopAndWait() } -func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (dbs *DBStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { if dbs.discardAfterTimeout { - return arbstate.DiscardAfterDataTimeout, nil + return daprovider.DiscardAfterDataTimeout, nil } - return arbstate.KeepForever, nil + return daprovider.KeepForever, nil } func (dbs *DBStorageService) String() string { diff --git a/das/extra_signature_checker_test.go b/das/extra_signature_checker_test.go index 88a0969229..2fcfac167d 100644 --- a/das/extra_signature_checker_test.go +++ b/das/extra_signature_checker_test.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/signature" ) @@ -22,7 +22,7 @@ type StubSignatureCheckDAS struct { keyDir string } -func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { pubkeyEncoded, err := ioutil.ReadFile(s.keyDir + "/ecdsa.pub") if err != nil { return nil, err @@ -39,8 +39,8 @@ func (s *StubSignatureCheckDAS) Store(ctx context.Context, message []byte, timeo return nil, nil } -func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *StubSignatureCheckDAS) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *StubSignatureCheckDAS) GetByHash(ctx context.Context, hash common.Hash) ([]byte, error) { diff --git a/das/fallback_storage_service.go b/das/fallback_storage_service.go index a78b4104e8..49f961da60 100644 --- a/das/fallback_storage_service.go +++ b/das/fallback_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/util/pretty" @@ -18,7 +18,7 @@ import ( type FallbackStorageService struct { StorageService - backup arbstate.DataAvailabilityReader + backup daprovider.DASReader backupHealthChecker DataAvailabilityServiceHealthChecker backupRetentionSeconds uint64 ignoreRetentionWriteErrors bool @@ -32,7 +32,7 @@ type FallbackStorageService struct { // a successful GetByHash result from the backup is Put into the primary. func NewFallbackStorageService( primary StorageService, - backup arbstate.DataAvailabilityReader, + backup daprovider.DASReader, backupHealthChecker DataAvailabilityServiceHealthChecker, backupRetentionSeconds uint64, // how long to retain data that we copy in from the backup (MaxUint64 means forever) ignoreRetentionWriteErrors bool, // if true, don't return error if write of retention data to primary fails diff --git a/das/ipfs_storage_service.go b/das/ipfs_storage_service.go index 4f73242c22..fa15fc7971 100644 --- a/das/ipfs_storage_service.go +++ b/das/ipfs_storage_service.go @@ -22,7 +22,7 @@ import ( "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" "github.com/multiformats/go-multihash" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -219,8 +219,8 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 panic("unreachable") } -func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *IpfsStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *IpfsStorageService) Sync(ctx context.Context) error { diff --git a/das/local_file_storage_service.go b/das/local_file_storage_service.go index 5fa5306e39..4ebb1d56d9 100644 --- a/das/local_file_storage_service.go +++ b/das/local_file_storage_service.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" flag "github.com/spf13/pflag" @@ -130,8 +130,8 @@ func (s *LocalFileStorageService) Close(ctx context.Context) error { return nil } -func (s *LocalFileStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (s *LocalFileStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (s *LocalFileStorageService) String() string { diff --git a/das/memory_backed_storage_service.go b/das/memory_backed_storage_service.go index 6484231479..91f7d9a2f5 100644 --- a/das/memory_backed_storage_service.go +++ b/das/memory_backed_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" ) @@ -79,8 +79,8 @@ func (m *MemoryBackedStorageService) Close(ctx context.Context) error { return nil } -func (m *MemoryBackedStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { - return arbstate.KeepForever, nil +func (m *MemoryBackedStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { + return daprovider.KeepForever, nil } func (m *MemoryBackedStorageService) String() string { diff --git a/das/panic_wrapper.go b/das/panic_wrapper.go index 7a15f6bec0..dbb61cba96 100644 --- a/das/panic_wrapper.go +++ b/das/panic_wrapper.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type WriterPanicWrapper struct { @@ -26,7 +26,7 @@ func (w *WriterPanicWrapper) String() string { return fmt.Sprintf("WriterPanicWrapper{%v}", w.DataAvailabilityServiceWriter) } -func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (w *WriterPanicWrapper) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { cert, err := w.DataAvailabilityServiceWriter.Store(ctx, message, timeout, sig) if err != nil { panic(fmt.Sprintf("panic wrapper Store: %v", err)) diff --git a/das/read_limited.go b/das/read_limited.go index 74d6d5358d..5ef0335d5f 100644 --- a/das/read_limited.go +++ b/das/read_limited.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) // These classes are wrappers implementing das.StorageService and das.DataAvailabilityService. @@ -16,12 +16,12 @@ import ( // it is a programming error in the code setting up the node or daserver if a non-writeable object // is used in a writeable context. -func NewReadLimitedStorageService(reader arbstate.DataAvailabilityReader) *readLimitedStorageService { +func NewReadLimitedStorageService(reader daprovider.DASReader) *readLimitedStorageService { return &readLimitedStorageService{reader} } type readLimitedStorageService struct { - arbstate.DataAvailabilityReader + daprovider.DASReader } func (s *readLimitedStorageService) Put(ctx context.Context, data []byte, expiration uint64) error { @@ -37,22 +37,22 @@ func (s *readLimitedStorageService) Close(ctx context.Context) error { } func (s *readLimitedStorageService) String() string { - return fmt.Sprintf("readLimitedStorageService(%v)", s.DataAvailabilityReader) + return fmt.Sprintf("readLimitedStorageService(%v)", s.DASReader) } type readLimitedDataAvailabilityService struct { - arbstate.DataAvailabilityReader + daprovider.DASReader } -func NewReadLimitedDataAvailabilityService(da arbstate.DataAvailabilityReader) *readLimitedDataAvailabilityService { +func NewReadLimitedDataAvailabilityService(da daprovider.DASReader) *readLimitedDataAvailabilityService { return &readLimitedDataAvailabilityService{da} } -func (*readLimitedDataAvailabilityService) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (*readLimitedDataAvailabilityService) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { panic("Logic error: readLimitedDataAvailabilityService.Store shouldn't be called.") } func (s *readLimitedDataAvailabilityService) String() string { - return fmt.Sprintf("ReadLimitedDataAvailabilityService(%v)", s.DataAvailabilityReader) + return fmt.Sprintf("ReadLimitedDataAvailabilityService(%v)", s.DASReader) } diff --git a/das/reader_aggregator_strategies.go b/das/reader_aggregator_strategies.go index 855be5e318..d20760bd5b 100644 --- a/das/reader_aggregator_strategies.go +++ b/das/reader_aggregator_strategies.go @@ -10,30 +10,30 @@ import ( "sync" "sync/atomic" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) var ErrNoReadersResponded = errors.New("no DAS readers responded successfully") type aggregatorStrategy interface { newInstance() aggregatorStrategyInstance - update([]arbstate.DataAvailabilityReader, map[arbstate.DataAvailabilityReader]readerStats) + update([]daprovider.DASReader, map[daprovider.DASReader]readerStats) } type abstractAggregatorStrategy struct { sync.RWMutex - readers []arbstate.DataAvailabilityReader - stats map[arbstate.DataAvailabilityReader]readerStats + readers []daprovider.DASReader + stats map[daprovider.DASReader]readerStats } -func (s *abstractAggregatorStrategy) update(readers []arbstate.DataAvailabilityReader, stats map[arbstate.DataAvailabilityReader]readerStats) { +func (s *abstractAggregatorStrategy) update(readers []daprovider.DASReader, stats map[daprovider.DASReader]readerStats) { s.Lock() defer s.Unlock() - s.readers = make([]arbstate.DataAvailabilityReader, len(readers)) + s.readers = make([]daprovider.DASReader, len(readers)) copy(s.readers, readers) - s.stats = make(map[arbstate.DataAvailabilityReader]readerStats) + s.stats = make(map[daprovider.DASReader]readerStats) for k, v := range stats { s.stats[k] = v } @@ -51,11 +51,11 @@ type simpleExploreExploitStrategy struct { func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance { iterations := atomic.AddUint32(&s.iterations, 1) - readerSets := make([][]arbstate.DataAvailabilityReader, 0) + readerSets := make([][]daprovider.DASReader, 0) s.RLock() defer s.RUnlock() - readers := make([]arbstate.DataAvailabilityReader, len(s.readers)) + readers := make([]daprovider.DASReader, len(s.readers)) copy(readers, s.readers) if iterations%(s.exploreIterations+s.exploitIterations) < s.exploreIterations { @@ -70,7 +70,7 @@ func (s *simpleExploreExploitStrategy) newInstance() aggregatorStrategyInstance } for i, maxTake := 0, 1; i < len(readers); maxTake = maxTake * 2 { - readerSet := make([]arbstate.DataAvailabilityReader, 0, maxTake) + readerSet := make([]daprovider.DASReader, 0, maxTake) for taken := 0; taken < maxTake && i < len(readers); i, taken = i+1, taken+1 { readerSet = append(readerSet, readers[i]) } @@ -91,7 +91,7 @@ func (s *testingSequentialStrategy) newInstance() aggregatorStrategyInstance { si := basicStrategyInstance{} for _, reader := range s.readers { - si.readerSets = append(si.readerSets, []arbstate.DataAvailabilityReader{reader}) + si.readerSets = append(si.readerSets, []daprovider.DASReader{reader}) } return &si @@ -99,14 +99,14 @@ func (s *testingSequentialStrategy) newInstance() aggregatorStrategyInstance { // Instance of a strategy that returns readers in an order according to the strategy type aggregatorStrategyInstance interface { - nextReaders() []arbstate.DataAvailabilityReader + nextReaders() []daprovider.DASReader } type basicStrategyInstance struct { - readerSets [][]arbstate.DataAvailabilityReader + readerSets [][]daprovider.DASReader } -func (si *basicStrategyInstance) nextReaders() []arbstate.DataAvailabilityReader { +func (si *basicStrategyInstance) nextReaders() []daprovider.DASReader { if len(si.readerSets) == 0 { return nil } diff --git a/das/reader_aggregator_strategies_test.go b/das/reader_aggregator_strategies_test.go index 987bc08938..cdb85b25e9 100644 --- a/das/reader_aggregator_strategies_test.go +++ b/das/reader_aggregator_strategies_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) type dummyReader struct { @@ -26,13 +26,13 @@ func (*dummyReader) HealthCheck(context.Context) error { return errors.New("not implemented") } -func (*dummyReader) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (*dummyReader) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return -1, errors.New("not implemented") } func TestDAS_SimpleExploreExploit(t *testing.T) { - readers := []arbstate.DataAvailabilityReader{&dummyReader{0}, &dummyReader{1}, &dummyReader{2}, &dummyReader{3}, &dummyReader{4}, &dummyReader{5}} - stats := make(map[arbstate.DataAvailabilityReader]readerStats) + readers := []daprovider.DASReader{&dummyReader{0}, &dummyReader{1}, &dummyReader{2}, &dummyReader{3}, &dummyReader{4}, &dummyReader{5}} + stats := make(map[daprovider.DASReader]readerStats) stats[readers[0]] = []readerStat{ // weighted avg 10s {10 * time.Second, true}, } @@ -57,7 +57,7 @@ func TestDAS_SimpleExploreExploit(t *testing.T) { {8 * time.Second, true}, } - expectedOrdering := []arbstate.DataAvailabilityReader{readers[1], readers[2], readers[5], readers[4], readers[0], readers[3]} + expectedOrdering := []daprovider.DASReader{readers[1], readers[2], readers[5], readers[4], readers[0], readers[3]} expectedExploreIterations, expectedExploitIterations := uint32(5), uint32(5) strategy := simpleExploreExploitStrategy{ @@ -66,7 +66,7 @@ func TestDAS_SimpleExploreExploit(t *testing.T) { } strategy.update(readers, stats) - checkMatch := func(expected, was []arbstate.DataAvailabilityReader, doMatch bool) { + checkMatch := func(expected, was []daprovider.DASReader, doMatch bool) { if len(expected) != len(was) { Fail(t, fmt.Sprintf("Incorrect number of nextReaders %d, expected %d", len(was), len(expected))) } diff --git a/das/redis_storage_service.go b/das/redis_storage_service.go index 3449a8e78c..dbd85921ed 100644 --- a/das/redis_storage_service.go +++ b/das/redis_storage_service.go @@ -13,7 +13,7 @@ import ( "golang.org/x/crypto/sha3" "github.com/go-redis/redis/v8" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/redisutil" @@ -162,7 +162,7 @@ func (rs *RedisStorageService) Close(ctx context.Context) error { return rs.baseStorageService.Close(ctx) } -func (rs *RedisStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (rs *RedisStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { return rs.baseStorageService.ExpirationPolicy(ctx) } diff --git a/das/redundant_storage_service.go b/das/redundant_storage_service.go index 74d32bd819..3158d28076 100644 --- a/das/redundant_storage_service.go +++ b/das/redundant_storage_service.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" ) @@ -121,7 +121,7 @@ func (r *RedundantStorageService) Close(ctx context.Context) error { return anyError } -func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { // If at least one inner service has KeepForever, // then whole redundant service can serve after timeout. @@ -132,20 +132,20 @@ func (r *RedundantStorageService) ExpirationPolicy(ctx context.Context) (arbstat // If no inner service has KeepForever, DiscardAfterArchiveTimeout, // but at least one inner service has DiscardAfterDataTimeout, // then whole redundant service can serve till data timeout. - var res arbstate.ExpirationPolicy = -1 + var res daprovider.ExpirationPolicy = -1 for _, serv := range r.innerServices { expirationPolicy, err := serv.ExpirationPolicy(ctx) if err != nil { return -1, err } switch expirationPolicy { - case arbstate.KeepForever: - return arbstate.KeepForever, nil - case arbstate.DiscardAfterArchiveTimeout: - res = arbstate.DiscardAfterArchiveTimeout - case arbstate.DiscardAfterDataTimeout: - if res != arbstate.DiscardAfterArchiveTimeout { - res = arbstate.DiscardAfterDataTimeout + case daprovider.KeepForever: + return daprovider.KeepForever, nil + case daprovider.DiscardAfterArchiveTimeout: + res = daprovider.DiscardAfterArchiveTimeout + case daprovider.DiscardAfterDataTimeout: + if res != daprovider.DiscardAfterArchiveTimeout { + res = daprovider.DiscardAfterDataTimeout } } } diff --git a/das/restful_client.go b/das/restful_client.go index 7d757c6bb8..b65426e7cd 100644 --- a/das/restful_client.go +++ b/das/restful_client.go @@ -14,11 +14,11 @@ import ( "strings" "github.com/ethereum/go-ethereum/common" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" ) -// RestfulDasClient implements DataAvailabilityReader +// RestfulDasClient implements daprovider.DASReader type RestfulDasClient struct { url string } @@ -65,7 +65,7 @@ func (c *RestfulDasClient) GetByHash(ctx context.Context, hash common.Hash) ([]b return nil, err } if !dastree.ValidHash(hash, decodedBytes) { - return nil, arbstate.ErrHashMismatch + return nil, daprovider.ErrHashMismatch } return decodedBytes, nil @@ -82,7 +82,7 @@ func (c *RestfulDasClient) HealthCheck(ctx context.Context) error { return nil } -func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { res, err := http.Get(c.url + expirationPolicyRequestPath) if err != nil { return -1, err @@ -101,5 +101,5 @@ func (c *RestfulDasClient) ExpirationPolicy(ctx context.Context) (arbstate.Expir return -1, err } - return arbstate.StringToExpirationPolicy(response.ExpirationPolicy) + return daprovider.StringToExpirationPolicy(response.ExpirationPolicy) } diff --git a/das/restful_server.go b/das/restful_server.go index 5c5e82e820..b1607729e2 100644 --- a/das/restful_server.go +++ b/das/restful_server.go @@ -17,7 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/util/pretty" ) @@ -32,13 +32,13 @@ var ( type RestfulDasServer struct { server *http.Server - daReader arbstate.DataAvailabilityReader + daReader daprovider.DASReader daHealthChecker DataAvailabilityServiceHealthChecker httpServerExitedChan chan interface{} httpServerError error } -func NewRestfulDasServer(address string, port uint64, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader arbstate.DataAvailabilityReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { +func NewRestfulDasServer(address string, port uint64, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader daprovider.DASReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) if err != nil { return nil, err @@ -46,7 +46,7 @@ func NewRestfulDasServer(address string, port uint64, restServerTimeouts generic return NewRestfulDasServerOnListener(listener, restServerTimeouts, daReader, daHealthChecker) } -func NewRestfulDasServerOnListener(listener net.Listener, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader arbstate.DataAvailabilityReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { +func NewRestfulDasServerOnListener(listener net.Listener, restServerTimeouts genericconf.HTTPServerTimeoutConfig, daReader daprovider.DASReader, daHealthChecker DataAvailabilityServiceHealthChecker) (*RestfulDasServer, error) { ret := &RestfulDasServer{ daReader: daReader, diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 134c4229c8..490116a89a 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -12,7 +12,7 @@ import ( "math/bits" "net/url" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/metricsutil" @@ -102,7 +102,7 @@ func KeysetHashFromServices(services []ServiceDetails, assumedHonest uint64) ([3 return [32]byte{}, nil, errors.New("at least two signers share a mask") } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: uint64(assumedHonest), PubKeys: pubKeys, } diff --git a/das/s3_storage_service.go b/das/s3_storage_service.go index 1a3ae94114..b5150fb8ed 100644 --- a/das/s3_storage_service.go +++ b/das/s3_storage_service.go @@ -15,7 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -145,11 +145,11 @@ func (s3s *S3StorageService) Close(ctx context.Context) error { return nil } -func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (s3s *S3StorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { if s3s.discardAfterTimeout { - return arbstate.DiscardAfterDataTimeout, nil + return daprovider.DiscardAfterDataTimeout, nil } - return arbstate.KeepForever, nil + return daprovider.KeepForever, nil } func (s3s *S3StorageService) String() string { diff --git a/das/sign_after_store_das_writer.go b/das/sign_after_store_das_writer.go index 50c4ee9aee..36c51c022e 100644 --- a/das/sign_after_store_das_writer.go +++ b/das/sign_after_store_das_writer.go @@ -18,7 +18,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -123,7 +123,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( return nil, err } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, PubKeys: []blsSignatures.PublicKey{publicKey}, } @@ -180,7 +180,7 @@ func NewSignAfterStoreDASWriterWithSeqInboxCaller( func (d *SignAfterStoreDASWriter) Store( ctx context.Context, message []byte, timeout uint64, sig []byte, -) (c *arbstate.DataAvailabilityCertificate, err error) { +) (c *daprovider.DataAvailabilityCertificate, err error) { log.Trace("das.SignAfterStoreDASWriter.Store", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", d) var verified bool if d.extraBpVerifier != nil { @@ -201,7 +201,7 @@ func (d *SignAfterStoreDASWriter) Store( } } - c = &arbstate.DataAvailabilityCertificate{ + c = &daprovider.DataAvailabilityCertificate{ Timeout: timeout, DataHash: dastree.Hash(message), Version: 1, diff --git a/das/simple_das_reader_aggregator.go b/das/simple_das_reader_aggregator.go index eb82a33837..dc6147a7e4 100644 --- a/das/simple_das_reader_aggregator.go +++ b/das/simple_das_reader_aggregator.go @@ -14,7 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -80,7 +80,7 @@ func SimpleExploreExploitStrategyConfigAddOptions(prefix string, f *flag.FlagSet func NewRestfulClientAggregator(ctx context.Context, config *RestfulClientAggregatorConfig) (*SimpleDASReaderAggregator, error) { a := SimpleDASReaderAggregator{ config: config, - stats: make(map[arbstate.DataAvailabilityReader]readerStats), + stats: make(map[daprovider.DASReader]readerStats), } combinedUrls := make(map[string]bool) @@ -160,7 +160,7 @@ type readerStat struct { type readerStatMessage struct { readerStat - reader arbstate.DataAvailabilityReader + reader daprovider.DASReader } type SimpleDASReaderAggregator struct { @@ -170,8 +170,8 @@ type SimpleDASReaderAggregator struct { readersMutex sync.RWMutex // readers and stats are only to be updated by the stats goroutine - readers []arbstate.DataAvailabilityReader - stats map[arbstate.DataAvailabilityReader]readerStats + readers []daprovider.DASReader + stats map[daprovider.DASReader]readerStats strategy aggregatorStrategy @@ -199,7 +199,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.H waitChan := make(chan interface{}) for _, reader := range readers { wg.Add(1) - go func(reader arbstate.DataAvailabilityReader) { + go func(reader daprovider.DASReader) { defer wg.Done() data, err := a.tryGetByHash(subCtx, hash, reader) if err != nil && errors.Is(ctx.Err(), context.Canceled) { @@ -243,7 +243,7 @@ func (a *SimpleDASReaderAggregator) GetByHash(ctx context.Context, hash common.H } func (a *SimpleDASReaderAggregator) tryGetByHash( - ctx context.Context, hash common.Hash, reader arbstate.DataAvailabilityReader, + ctx context.Context, hash common.Hash, reader daprovider.DASReader, ) ([]byte, error) { stat := readerStatMessage{reader: reader} stat.success = false @@ -278,7 +278,7 @@ func (a *SimpleDASReaderAggregator) Start(ctx context.Context) { defer a.readersMutex.Unlock() combinedUrls := a.config.Urls combinedUrls = append(combinedUrls, urls...) - combinedReaders := make(map[arbstate.DataAvailabilityReader]bool) + combinedReaders := make(map[daprovider.DASReader]bool) for _, url := range combinedUrls { reader, err := NewRestfulDasClientFromURL(url) if err != nil { @@ -286,7 +286,7 @@ func (a *SimpleDASReaderAggregator) Start(ctx context.Context) { } combinedReaders[reader] = true } - a.readers = make([]arbstate.DataAvailabilityReader, 0, len(combinedUrls)) + a.readers = make([]daprovider.DASReader, 0, len(combinedUrls)) // Update reader and add newly added stats for reader := range combinedReaders { a.readers = append(a.readers, reader) @@ -350,7 +350,7 @@ func (a *SimpleDASReaderAggregator) HealthCheck(ctx context.Context) error { return nil } -func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (arbstate.ExpirationPolicy, error) { +func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { a.readersMutex.RLock() defer a.readersMutex.RUnlock() if len(a.readers) == 0 { @@ -368,7 +368,7 @@ func (a *SimpleDASReaderAggregator) ExpirationPolicy(ctx context.Context) (arbst return -1, err } if ep != expectedExpirationPolicy { - return arbstate.MixedTimeout, nil + return daprovider.MixedTimeout, nil } } return expectedExpirationPolicy, nil diff --git a/das/storage_service.go b/das/storage_service.go index 881d6fc8b1..806e80dba5 100644 --- a/das/storage_service.go +++ b/das/storage_service.go @@ -11,13 +11,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" ) var ErrNotFound = errors.New("not found") type StorageService interface { - arbstate.DataAvailabilityReader + daprovider.DASReader Put(ctx context.Context, data []byte, expirationTime uint64) error Sync(ctx context.Context) error Closer diff --git a/das/store_signing.go b/das/store_signing.go index 8039774b65..8ebc1a9805 100644 --- a/das/store_signing.go +++ b/das/store_signing.go @@ -12,7 +12,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/signature" @@ -56,7 +56,7 @@ func NewStoreSigningDAS(inner DataAvailabilityServiceWriter, signer signature.Da return &StoreSigningDAS{inner, signer, addr}, nil } -func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*arbstate.DataAvailabilityCertificate, error) { +func (s *StoreSigningDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte) (*daprovider.DataAvailabilityCertificate, error) { log.Trace("das.StoreSigningDAS.Store(...)", "message", pretty.FirstFewBytes(message), "timeout", time.Unix(int64(timeout), 0), "sig", pretty.FirstFewBytes(sig), "this", s) mySig, err := applyDasSigner(s.signer, message, timeout) if err != nil { diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 91f2e522a7..868a2017ee 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -20,7 +20,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util/arbmath" @@ -94,7 +94,7 @@ type l1SyncService struct { config SyncToStorageConfig syncTo StorageService - dataSource arbstate.DataAvailabilityReader + dataSource daprovider.DASReader l1Reader *headerreader.HeaderReader inboxContract *bridgegen.SequencerInbox @@ -161,7 +161,7 @@ func writeSyncState(syncDir string, blockNr uint64) error { return os.Rename(f.Name(), path) } -func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSource arbstate.DataAvailabilityReader, l1Reader *headerreader.HeaderReader, inboxAddr common.Address) (*l1SyncService, error) { +func newl1SyncService(config *SyncToStorageConfig, syncTo StorageService, dataSource daprovider.DASReader, l1Reader *headerreader.HeaderReader, inboxAddr common.Address) (*l1SyncService, error) { l1Client := l1Reader.Client() inboxContract, err := bridgegen.NewSequencerInbox(inboxAddr, l1Client) if err != nil { @@ -213,7 +213,7 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere data = append(header, data...) preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - if _, err = arbstate.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimages, arbstate.KeysetValidate); err != nil { + if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimages, daprovider.KeysetValidate); err != nil { log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) return err } @@ -291,7 +291,7 @@ func FindDASDataFromLog( log.Warn("BatchDelivered - no data found", "data", data) return nil, nil } - if !arbstate.IsDASMessageHeaderByte(data[0]) { + if !daprovider.IsDASMessageHeaderByte(data[0]) { log.Warn("BatchDelivered - data not DAS") return nil, nil } @@ -417,7 +417,7 @@ type SyncingFallbackStorageService struct { func NewSyncingFallbackStorageService(ctx context.Context, primary StorageService, - backup arbstate.DataAvailabilityReader, + backup daprovider.DASReader, backupHealthChecker DataAvailabilityServiceHealthChecker, l1Reader *headerreader.HeaderReader, inboxAddr common.Address, diff --git a/das/util.go b/das/util.go index d98a2687fe..de266c433f 100644 --- a/das/util.go +++ b/das/util.go @@ -7,11 +7,11 @@ import ( "time" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/util/pretty" ) -func logPut(store string, data []byte, timeout uint64, reader arbstate.DataAvailabilityReader, more ...interface{}) { +func logPut(store string, data []byte, timeout uint64, reader daprovider.DASReader, more ...interface{}) { if len(more) == 0 { log.Trace( store, "message", pretty.FirstFewBytes(data), "timeout", time.Unix(int64(timeout), 0), diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 4e7aa22cbe..ecbcb840e6 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -10,7 +10,7 @@ import ( "math/big" "time" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" @@ -50,7 +50,7 @@ type L1Validator struct { wallet ValidatorWalletInterface callOpts bind.CallOpts - das arbstate.DataAvailabilityReader + das daprovider.DASReader inboxTracker InboxTrackerInterface txStreamer TransactionStreamerInterface blockValidator *BlockValidator @@ -62,7 +62,7 @@ func NewL1Validator( wallet ValidatorWalletInterface, validatorUtilsAddress common.Address, callOpts bind.CallOpts, - das arbstate.DataAvailabilityReader, + das daprovider.DASReader, inboxTracker InboxTrackerInterface, txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 6fdddd3390..5f553ba8e9 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -11,6 +11,7 @@ import ( "sync" "testing" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/util/rpcclient" "github.com/offchainlabs/nitro/validator/server_api" @@ -23,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/offchainlabs/nitro/arbos/arbostypes" - "github.com/offchainlabs/nitro/arbstate" ) type StatelessBlockValidator struct { @@ -38,8 +38,8 @@ type StatelessBlockValidator struct { inboxTracker InboxTrackerInterface streamer TransactionStreamerInterface db ethdb.Database - daService arbstate.DataAvailabilityReader - blobReader arbstate.BlobReader + daService daprovider.DASReader + blobReader daprovider.BlobReader moduleMutex sync.Mutex currentWasmModuleRoot common.Hash @@ -221,8 +221,8 @@ func NewStatelessBlockValidator( streamer TransactionStreamerInterface, recorder execution.ExecutionRecorder, arbdb ethdb.Database, - das arbstate.DataAvailabilityReader, - blobReader arbstate.BlobReader, + das daprovider.DASReader, + blobReader daprovider.BlobReader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -293,7 +293,7 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } - if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { + if daprovider.IsBlobHashesHeaderByte(batch.Data[40]) { payload := batch.Data[41:] if len(payload)%len(common.Hash{}) != 0 { return fmt.Errorf("blob batch data is not a list of hashes as expected") @@ -313,12 +313,12 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] } } - if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { + if daprovider.IsDASMessageHeaderByte(batch.Data[40]) { if v.daService == nil { log.Warn("No DAS configured, but sequencer message found with DAS header") } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, + _, err := daprovider.RecoverPayloadFromDasBatch( + ctx, batch.Number, batch.Data, v.daService, e.Preimages, daprovider.KeysetValidate, ) if err != nil { return err diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 68dea4167f..3ccee0e008 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -171,7 +171,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, DeployInfo: builder.L2.ConsensusNode.DeployInfo, TransactOpts: &seqTxOpts, - DAWriter: nil, + DAPWriter: nil, ParentChainID: parentChainID, }, ) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 1dbd0d81b3..a7d9e7b00d 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -18,7 +18,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/util" - "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/blsSignatures" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/cmd/genericconf" @@ -964,7 +964,7 @@ func authorizeDASKeyset( if dasSignerKey == nil { return } - keyset := &arbstate.DataAvailabilityKeyset{ + keyset := &daprovider.DataAvailabilityKeyset{ AssumedHonest: 1, PubKeys: []blsSignatures.PublicKey{*dasSignerKey}, } diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index 1b29dca4b9..5cbf934d82 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbos/l2pricing" "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/statetransfer" ) @@ -41,7 +42,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, daprovider.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -173,7 +174,7 @@ func FuzzStateTransition(f *testing.F) { binary.BigEndian.PutUint64(seqBatch[24:32], ^uint64(0)) binary.BigEndian.PutUint64(seqBatch[32:40], uint64(len(delayedMessages))) if compressSeqMsg { - seqBatch = append(seqBatch, arbstate.BrotliMessageHeaderByte) + seqBatch = append(seqBatch, daprovider.BrotliMessageHeaderByte) seqMsgCompressed, err := arbcompress.CompressLevel(seqMsg, 0) if err != nil { panic(fmt.Sprintf("failed to compress sequencer message: %v", err)) From 6f08d8d13fc9bc6f99cec9312c80c24597fb0c14 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 14 Mar 2024 15:39:30 -0500 Subject: [PATCH 02/40] code refactor --- arbstate/inbox.go | 1 - 1 file changed, 1 deletion(-) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 6c85c9facf..7c3276d9a0 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -48,7 +48,6 @@ type sequencerMessage struct { const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { From 8c256b75320c51bc22f0fe45d952bf503cb3e19f Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 20 Mar 2024 10:50:55 -0500 Subject: [PATCH 03/40] code refactor --- arbnode/delayed_seq_reorg_test.go | 2 +- arbnode/inbox_tracker.go | 21 ++++----------------- arbnode/node.go | 13 ++++++++++++- cmd/pruning/pruning.go | 2 +- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index beb2656e2b..9ad984ae6c 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil, nil) + tracker, err := NewInboxTracker(db, streamer, nil) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index a20807b82f..a8023585df 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -38,23 +38,17 @@ type InboxTracker struct { txStreamer *TransactionStreamer mutex sync.Mutex validator *staker.BlockValidator - das daprovider.DASReader - blobReader daprovider.BlobReader + dapReaders []daprovider.Reader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das daprovider.DASReader, blobReader daprovider.BlobReader) (*InboxTracker, error) { - // We support a nil txStreamer for the pruning code - if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { - return nil, errors.New("data availability service required but unconfigured") - } +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, dapReaders []daprovider.Reader) (*InboxTracker, error) { tracker := &InboxTracker{ db: db, txStreamer: txStreamer, - das: das, - blobReader: blobReader, + dapReaders: dapReaders, batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil @@ -607,14 +601,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - var daProviders []daprovider.Reader - if t.das != nil { - daProviders = append(daProviders, daprovider.NewReaderForDAS(t.das)) - } - if t.blobReader != nil { - daProviders = append(daProviders, daprovider.NewReaderForBlobReader(t.blobReader)) - } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, daProviders, daprovider.KeysetValidate) + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.dapReaders, daprovider.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index ca09aac57f..6666591ebc 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -535,7 +535,18 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) + // We support a nil txStreamer for the pruning code + if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daReader == nil { + return nil, errors.New("data availability service required but unconfigured") + } + var dapReaders []daprovider.Reader + if daReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader)) + } + if blobReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) + } + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders) if err != nil { return nil, err } diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index da015ac52c..68d89302f0 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) if err != nil { return nil, err } From 26f3b8014133e52ccd8a07daaa2a2ceaaa314979 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 4 Apr 2024 16:56:01 -0500 Subject: [PATCH 04/40] address PR comments --- arbstate/daprovider/reader.go | 140 ++++++++++++++++++++++++++++++++-- arbstate/daprovider/util.go | 1 + arbstate/inbox.go | 14 +++- 3 files changed, 146 insertions(+), 9 deletions(-) diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go index b37d18420a..d6f1a7f618 100644 --- a/arbstate/daprovider/reader.go +++ b/arbstate/daprovider/reader.go @@ -4,12 +4,16 @@ package daprovider import ( + "bytes" "context" + "encoding/binary" "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/blobs" ) @@ -23,11 +27,19 @@ type Reader interface { batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, ) ([]byte, error) + + // RecordPreimagesTo takes in preimages map and returns a function that can be used + // In recording (hash,preimage) key value pairs into preimages map, when fetching payload + RecordPreimagesTo( + preimages map[arbutil.PreimageType]map[common.Hash][]byte, + ) PreimageRecorder } +type PreimageRecorder func(key common.Hash, value []byte) + // NewReaderForDAS is generally meant to be only used by nitro. // DA Providers should implement methods in the Reader interface independently func NewReaderForDAS(dasReader DASReader) *readerForDAS { @@ -42,15 +54,109 @@ func (d *readerForDAS) IsValidHeaderByte(headerByte byte) bool { return IsDASMessageHeaderByte(headerByte) } +func (d *readerForDAS) RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { + if preimages == nil { + return nil + } + if preimages[arbutil.Keccak256PreimageType] == nil { + preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) + } + return func(key common.Hash, value []byte) { + preimages[arbutil.Keccak256PreimageType][key] = value + } +} + func (d *readerForDAS) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, ) ([]byte, error) { - return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, preimages, keysetValidationMode) + cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) + if err != nil { + log.Error("Failed to deserialize DAS message", "err", err) + return nil, nil + } + version := cert.Version + + if version >= 2 { + log.Error("Your node software is probably out of date", "certificateVersion", version) + return nil, nil + } + + getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { + newHash := hash + if version == 0 { + newHash = dastree.FlatHashToTreeHash(hash) + } + + preimage, err := d.dasReader.GetByHash(ctx, newHash) + if err != nil && hash != newHash { + log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) + preimage, err = d.dasReader.GetByHash(ctx, hash) + } + if err != nil { + return nil, err + } + + switch { + case version == 0 && crypto.Keccak256Hash(preimage) != hash: + fallthrough + case version == 1 && dastree.Hash(preimage) != hash: + log.Error( + "preimage mismatch for hash", + "hash", hash, "err", ErrHashMismatch, "version", version, + ) + return nil, ErrHashMismatch + } + return preimage, nil + } + + keysetPreimage, err := getByHash(ctx, cert.KeysetHash) + if err != nil { + log.Error("Couldn't get keyset", "err", err) + return nil, err + } + if preimageRecorder != nil { + dastree.RecordHash(preimageRecorder, keysetPreimage) + } + + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), !validateSeqMsg) + if err != nil { + return nil, fmt.Errorf("%w. Couldn't deserialize keyset, err: %w, keyset hash: %x batch num: %d", ErrSeqMsgValidation, err, cert.KeysetHash, batchNum) + } + err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) + if err != nil { + log.Error("Bad signature on DAS batch", "err", err) + return nil, nil + } + + maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) + if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { + log.Error("Data availability cert expires too soon", "err", "") + return nil, nil + } + + dataHash := cert.DataHash + payload, err := getByHash(ctx, dataHash) + if err != nil { + log.Error("Couldn't fetch DAS batch contents", "err", err) + return nil, err + } + + if preimageRecorder != nil { + if version == 0 { + treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) + preimageRecorder(dataHash, payload) + preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf) + } else { + dastree.RecordHash(preimageRecorder, payload) + } + } + + return payload, nil } // NewReaderForBlobReader is generally meant to be only used by nitro. @@ -67,13 +173,25 @@ func (b *readerForBlobReader) IsValidHeaderByte(headerByte byte) bool { return IsBlobHashesHeaderByte(headerByte) } +func (b *readerForBlobReader) RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { + if preimages == nil { + return nil + } + if preimages[arbutil.EthVersionedHashPreimageType] == nil { + preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) + } + return func(key common.Hash, value []byte) { + preimages[arbutil.EthVersionedHashPreimageType][key] = value + } +} + func (b *readerForBlobReader) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, batchBlockHash common.Hash, sequencerMsg []byte, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, ) ([]byte, error) { blobHashes := sequencerMsg[41:] if len(blobHashes)%len(common.Hash{}) != 0 { @@ -87,6 +205,14 @@ func (b *readerForBlobReader) RecoverPayloadFromBatch( if err != nil { return nil, fmt.Errorf("failed to get blobs: %w", err) } + if preimageRecorder != nil { + for i, blob := range kzgBlobs { + // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable + // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview + b := blob + preimageRecorder(versionedHashes[i], b[:]) + } + } payload, err := blobs.DecodeBlobs(kzgBlobs) if err != nil { log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) diff --git a/arbstate/daprovider/util.go b/arbstate/daprovider/util.go index acf81f25fd..0c7909d358 100644 --- a/arbstate/daprovider/util.go +++ b/arbstate/daprovider/util.go @@ -106,6 +106,7 @@ var ( ErrBatchToDasFailed = errors.New("unable to batch to DAS") ErrNoBlobReader = errors.New("blob batch payload was encountered but no BlobReader was configured") ErrInvalidBlobDataFormat = errors.New("blob batch data is not a list of hashes as expected") + ErrSeqMsgValidation = errors.New("error validating recovered payload from batch") ) type KeysetValidationMode uint8 diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 7c3276d9a0..410ddd41b3 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -80,9 +80,19 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash var err error for _, provider := range daProviders { if provider != nil && provider.IsValidHeaderByte(payload[0]) { - payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode) + payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) if err != nil { - return nil, err + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(payload[0]) { + logLevel := log.Error + if keysetValidationMode == daprovider.KeysetPanicIfInvalid { + logLevel = log.Crit + } + logLevel(err.Error()) + } else { + return nil, err + } } if payload == nil { return parsedMsg, nil From d3ffedcf552f6066b5a5319089ecb689f4955cce Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 5 Apr 2024 10:33:02 -0500 Subject: [PATCH 05/40] address PR comments --- arbstate/daprovider/reader.go | 38 +++-------------------------------- arbstate/daprovider/util.go | 18 ++++++++++++++++- das/dastree/dastree.go | 7 ++++--- das/dastree/dastree_test.go | 3 ++- das/ipfs_storage_service.go | 3 ++- 5 files changed, 28 insertions(+), 41 deletions(-) diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go index d6f1a7f618..6ddb172814 100644 --- a/arbstate/daprovider/reader.go +++ b/arbstate/daprovider/reader.go @@ -30,16 +30,8 @@ type Reader interface { preimageRecorder PreimageRecorder, validateSeqMsg bool, ) ([]byte, error) - - // RecordPreimagesTo takes in preimages map and returns a function that can be used - // In recording (hash,preimage) key value pairs into preimages map, when fetching payload - RecordPreimagesTo( - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - ) PreimageRecorder } -type PreimageRecorder func(key common.Hash, value []byte) - // NewReaderForDAS is generally meant to be only used by nitro. // DA Providers should implement methods in the Reader interface independently func NewReaderForDAS(dasReader DASReader) *readerForDAS { @@ -54,18 +46,6 @@ func (d *readerForDAS) IsValidHeaderByte(headerByte byte) bool { return IsDASMessageHeaderByte(headerByte) } -func (d *readerForDAS) RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { - if preimages == nil { - return nil - } - if preimages[arbutil.Keccak256PreimageType] == nil { - preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - return func(key common.Hash, value []byte) { - preimages[arbutil.Keccak256PreimageType][key] = value - } -} - func (d *readerForDAS) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, @@ -149,8 +129,8 @@ func (d *readerForDAS) RecoverPayloadFromBatch( if preimageRecorder != nil { if version == 0 { treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - preimageRecorder(dataHash, payload) - preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf) + preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) + preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf, arbutil.Keccak256PreimageType) } else { dastree.RecordHash(preimageRecorder, payload) } @@ -173,18 +153,6 @@ func (b *readerForBlobReader) IsValidHeaderByte(headerByte byte) bool { return IsBlobHashesHeaderByte(headerByte) } -func (b *readerForBlobReader) RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { - if preimages == nil { - return nil - } - if preimages[arbutil.EthVersionedHashPreimageType] == nil { - preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) - } - return func(key common.Hash, value []byte) { - preimages[arbutil.EthVersionedHashPreimageType][key] = value - } -} - func (b *readerForBlobReader) RecoverPayloadFromBatch( ctx context.Context, batchNum uint64, @@ -210,7 +178,7 @@ func (b *readerForBlobReader) RecoverPayloadFromBatch( // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview b := blob - preimageRecorder(versionedHashes[i], b[:]) + preimageRecorder(versionedHashes[i], b[:], arbutil.EthVersionedHashPreimageType) } } payload, err := blobs.DecodeBlobs(kzgBlobs) diff --git a/arbstate/daprovider/util.go b/arbstate/daprovider/util.go index 0c7909d358..861644ea33 100644 --- a/arbstate/daprovider/util.go +++ b/arbstate/daprovider/util.go @@ -43,6 +43,22 @@ type BlobReader interface { Initialize(ctx context.Context) error } +type PreimageRecorder func(key common.Hash, value []byte, ty arbutil.PreimageType) + +// RecordPreimagesTo takes in preimages map and returns a function that can be used +// In recording (hash,preimage) key value pairs into preimages map, when fetching payload through RecoverPayloadFromBatch +func RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte) PreimageRecorder { + if preimages == nil { + return nil + } + return func(key common.Hash, value []byte, ty arbutil.PreimageType) { + if preimages[ty] == nil { + preimages[ty] = make(map[common.Hash][]byte) + } + preimages[ty][key] = value + } +} + // DASMessageHeaderFlag indicates that this data is a certificate for the data availability service, // which will retrieve the full batch data. const DASMessageHeaderFlag byte = 0x80 @@ -136,7 +152,7 @@ func RecoverPayloadFromDasBatch( return nil, nil } version := cert.Version - recordPreimage := func(key common.Hash, value []byte) { + recordPreimage := func(key common.Hash, value []byte, ty arbutil.PreimageType) { keccakPreimages[key] = value } diff --git a/das/dastree/dastree.go b/das/dastree/dastree.go index bc325a3200..d873f0568d 100644 --- a/das/dastree/dastree.go +++ b/das/dastree/dastree.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -26,7 +27,7 @@ type node struct { // RecordHash chunks the preimage into 64kB bins and generates a recursive hash tree, // calling the caller-supplied record function for each hash/preimage pair created in // building the tree structure. -func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { +func RecordHash(record func(bytes32, []byte, arbutil.PreimageType), preimage ...[]byte) bytes32 { // Algorithm // 1. split the preimage into 64kB bins and double hash them to produce the tree's leaves // 2. repeatedly hash pairs and their combined length, bubbling up any odd-one's out, to form the root @@ -48,7 +49,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { keccord := func(value []byte) bytes32 { hash := crypto.Keccak256Hash(value) - record(hash, value) + record(hash, value, arbutil.Keccak256PreimageType) return hash } prepend := func(before byte, slice []byte) []byte { @@ -94,7 +95,7 @@ func RecordHash(record func(bytes32, []byte), preimage ...[]byte) bytes32 { func Hash(preimage ...[]byte) bytes32 { // Merkelizes without recording anything. All but the validator's DAS will call this - return RecordHash(func(bytes32, []byte) {}, preimage...) + return RecordHash(func(bytes32, []byte, arbutil.PreimageType) {}, preimage...) } func HashBytes(preimage ...[]byte) []byte { diff --git a/das/dastree/dastree_test.go b/das/dastree/dastree_test.go index 33f729f4f3..4d24c9ae98 100644 --- a/das/dastree/dastree_test.go +++ b/das/dastree/dastree_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/colors" "github.com/offchainlabs/nitro/util/pretty" "github.com/offchainlabs/nitro/util/testhelpers" @@ -25,7 +26,7 @@ func TestDASTree(t *testing.T) { tests = append(tests, large) } - record := func(key bytes32, value []byte) { + record := func(key bytes32, value []byte, ty arbutil.PreimageType) { colors.PrintGrey("storing ", key, " ", pretty.PrettyBytes(value)) store[key] = value if crypto.Keccak256Hash(value) != key { diff --git a/das/ipfs_storage_service.go b/das/ipfs_storage_service.go index fa15fc7971..a66db09288 100644 --- a/das/ipfs_storage_service.go +++ b/das/ipfs_storage_service.go @@ -23,6 +23,7 @@ import ( "github.com/ipfs/interface-go-ipfs-core/path" "github.com/multiformats/go-multihash" "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/ipfshelper" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/pretty" @@ -180,7 +181,7 @@ func (s *IpfsStorageService) Put(ctx context.Context, data []byte, timeout uint6 var chunks [][]byte - record := func(_ common.Hash, value []byte) { + record := func(_ common.Hash, value []byte, ty arbutil.PreimageType) { chunks = append(chunks, value) } From 3e1c15b9e517a83fb6b2081224d6ec8231c184e0 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 5 Apr 2024 15:03:56 -0500 Subject: [PATCH 06/40] address PR comments and use reader interface in StatelessBlockValidator --- arbnode/node.go | 10 ++- arbstate/daprovider/reader.go | 88 +----------------------- arbstate/daprovider/util.go | 35 +++------- arbstate/inbox.go | 18 ++--- das/syncing_fallback_storage.go | 11 ++- staker/l1_validator.go | 4 -- staker/staker.go | 2 +- staker/stateless_block_validator.go | 59 ++++++---------- system_tests/full_challenge_impl_test.go | 4 +- system_tests/staker_test.go | 2 - 10 files changed, 61 insertions(+), 172 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 6532c1fcf2..2cd92d53fa 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -552,14 +552,20 @@ func createNodeImpl( var statelessBlockValidator *staker.StatelessBlockValidator if config.BlockValidator.ValidationServerConfigs[0].URL != "" { + var dapReaders []daprovider.Reader + if daReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader)) + } + if blobReader != nil { + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) + } statelessBlockValidator, err = staker.NewStatelessBlockValidator( inboxReader, inboxTracker, txStreamer, exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), - daReader, - blobReader, + dapReaders, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) diff --git a/arbstate/daprovider/reader.go b/arbstate/daprovider/reader.go index 6ddb172814..560af3af1d 100644 --- a/arbstate/daprovider/reader.go +++ b/arbstate/daprovider/reader.go @@ -4,16 +4,12 @@ package daprovider import ( - "bytes" "context" - "encoding/binary" "fmt" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/util/blobs" ) @@ -54,89 +50,7 @@ func (d *readerForDAS) RecoverPayloadFromBatch( preimageRecorder PreimageRecorder, validateSeqMsg bool, ) ([]byte, error) { - cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) - if err != nil { - log.Error("Failed to deserialize DAS message", "err", err) - return nil, nil - } - version := cert.Version - - if version >= 2 { - log.Error("Your node software is probably out of date", "certificateVersion", version) - return nil, nil - } - - getByHash := func(ctx context.Context, hash common.Hash) ([]byte, error) { - newHash := hash - if version == 0 { - newHash = dastree.FlatHashToTreeHash(hash) - } - - preimage, err := d.dasReader.GetByHash(ctx, newHash) - if err != nil && hash != newHash { - log.Debug("error fetching new style hash, trying old", "new", newHash, "old", hash, "err", err) - preimage, err = d.dasReader.GetByHash(ctx, hash) - } - if err != nil { - return nil, err - } - - switch { - case version == 0 && crypto.Keccak256Hash(preimage) != hash: - fallthrough - case version == 1 && dastree.Hash(preimage) != hash: - log.Error( - "preimage mismatch for hash", - "hash", hash, "err", ErrHashMismatch, "version", version, - ) - return nil, ErrHashMismatch - } - return preimage, nil - } - - keysetPreimage, err := getByHash(ctx, cert.KeysetHash) - if err != nil { - log.Error("Couldn't get keyset", "err", err) - return nil, err - } - if preimageRecorder != nil { - dastree.RecordHash(preimageRecorder, keysetPreimage) - } - - keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), !validateSeqMsg) - if err != nil { - return nil, fmt.Errorf("%w. Couldn't deserialize keyset, err: %w, keyset hash: %x batch num: %d", ErrSeqMsgValidation, err, cert.KeysetHash, batchNum) - } - err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) - if err != nil { - log.Error("Bad signature on DAS batch", "err", err) - return nil, nil - } - - maxTimestamp := binary.BigEndian.Uint64(sequencerMsg[8:16]) - if cert.Timeout < maxTimestamp+MinLifetimeSecondsForDataAvailabilityCert { - log.Error("Data availability cert expires too soon", "err", "") - return nil, nil - } - - dataHash := cert.DataHash - payload, err := getByHash(ctx, dataHash) - if err != nil { - log.Error("Couldn't fetch DAS batch contents", "err", err) - return nil, err - } - - if preimageRecorder != nil { - if version == 0 { - treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) - preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf, arbutil.Keccak256PreimageType) - } else { - dastree.RecordHash(preimageRecorder, payload) - } - } - - return payload, nil + return RecoverPayloadFromDasBatch(ctx, batchNum, sequencerMsg, d.dasReader, preimageRecorder, validateSeqMsg) } // NewReaderForBlobReader is generally meant to be only used by nitro. diff --git a/arbstate/daprovider/util.go b/arbstate/daprovider/util.go index 861644ea33..6a7d27ab08 100644 --- a/arbstate/daprovider/util.go +++ b/arbstate/daprovider/util.go @@ -136,25 +136,15 @@ func RecoverPayloadFromDasBatch( batchNum uint64, sequencerMsg []byte, dasReader DASReader, - preimages map[arbutil.PreimageType]map[common.Hash][]byte, - keysetValidationMode KeysetValidationMode, + preimageRecorder PreimageRecorder, + validateSeqMsg bool, ) ([]byte, error) { - var keccakPreimages map[common.Hash][]byte - if preimages != nil { - if preimages[arbutil.Keccak256PreimageType] == nil { - preimages[arbutil.Keccak256PreimageType] = make(map[common.Hash][]byte) - } - keccakPreimages = preimages[arbutil.Keccak256PreimageType] - } cert, err := DeserializeDASCertFrom(bytes.NewReader(sequencerMsg[40:])) if err != nil { log.Error("Failed to deserialize DAS message", "err", err) return nil, nil } version := cert.Version - recordPreimage := func(key common.Hash, value []byte, ty arbutil.PreimageType) { - keccakPreimages[key] = value - } if version >= 2 { log.Error("Your node software is probably out of date", "certificateVersion", version) @@ -194,18 +184,13 @@ func RecoverPayloadFromDasBatch( log.Error("Couldn't get keyset", "err", err) return nil, err } - if keccakPreimages != nil { - dastree.RecordHash(recordPreimage, keysetPreimage) + if preimageRecorder != nil { + dastree.RecordHash(preimageRecorder, keysetPreimage) } - keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), keysetValidationMode == KeysetDontValidate) + keyset, err := DeserializeKeyset(bytes.NewReader(keysetPreimage), !validateSeqMsg) if err != nil { - logLevel := log.Error - if keysetValidationMode == KeysetPanicIfInvalid { - logLevel = log.Crit - } - logLevel("Couldn't deserialize keyset", "err", err, "keysetHash", cert.KeysetHash, "batchNum", batchNum) - return nil, nil + return nil, fmt.Errorf("%w. Couldn't deserialize keyset, err: %w, keyset hash: %x batch num: %d", ErrSeqMsgValidation, err, cert.KeysetHash, batchNum) } err = keyset.VerifySignature(cert.SignersMask, cert.SerializeSignableFields(), cert.Sig) if err != nil { @@ -226,13 +211,13 @@ func RecoverPayloadFromDasBatch( return nil, err } - if keccakPreimages != nil { + if preimageRecorder != nil { if version == 0 { treeLeaf := dastree.FlatHashToTreeLeaf(dataHash) - keccakPreimages[dataHash] = payload - keccakPreimages[crypto.Keccak256Hash(treeLeaf)] = treeLeaf + preimageRecorder(dataHash, payload, arbutil.Keccak256PreimageType) + preimageRecorder(crypto.Keccak256Hash(treeLeaf), treeLeaf, arbutil.Keccak256PreimageType) } else { - dastree.RecordHash(recordPreimage, payload) + dastree.RecordHash(preimageRecorder, payload) } } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 410ddd41b3..753ca19cd6 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -49,7 +49,7 @@ const MaxDecompressedLen int = 1024 * 1024 * 16 // 16 MiB const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 -func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, daProviders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -74,13 +74,13 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Stage 1: Extract the payload from any data availability header. // It's important that multiple DAS strategies can't both be invoked in the same batch, // as these headers are validated by the sequencer inbox and not other DASs. - // We try to extract payload from the first occuring valid DA provider in the daProviders list + // We try to extract payload from the first occuring valid DA reader in the dapReaders list if len(payload) > 0 { foundDA := false var err error - for _, provider := range daProviders { - if provider != nil && provider.IsValidHeaderByte(payload[0]) { - payload, err = provider.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) + for _, dapReader := range dapReaders { + if dapReader != nil && dapReader.IsValidHeaderByte(payload[0]) { + payload, err = dapReader.RecoverPayloadFromBatch(ctx, batchNum, batchBlockHash, data, nil, keysetValidationMode != daprovider.KeysetDontValidate) if err != nil { // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error @@ -164,7 +164,7 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 - daProviders []daprovider.Reader + dapReaders []daprovider.Reader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -174,11 +174,11 @@ type inboxMultiplexer struct { keysetValidationMode daprovider.KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, daProviders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dapReaders []daprovider.Reader, keysetValidationMode daprovider.KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, - daProviders: daProviders, + dapReaders: dapReaders, keysetValidationMode: keysetValidationMode, } } @@ -200,7 +200,7 @@ func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMeta } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.daProviders, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dapReaders, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/das/syncing_fallback_storage.go b/das/syncing_fallback_storage.go index 372671defd..411e7a1977 100644 --- a/das/syncing_fallback_storage.go +++ b/das/syncing_fallback_storage.go @@ -213,9 +213,14 @@ func (s *l1SyncService) processBatchDelivered(ctx context.Context, batchDelivere data = append(header, data...) preimages := make(map[arbutil.PreimageType]map[common.Hash][]byte) - if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimages, daprovider.KeysetValidate); err != nil { - log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) - return err + preimageRecorder := daprovider.RecordPreimagesTo(preimages) + if _, err = daprovider.RecoverPayloadFromDasBatch(ctx, deliveredEvent.BatchSequenceNumber.Uint64(), data, s.dataSource, preimageRecorder, true); err != nil { + if errors.Is(err, daprovider.ErrSeqMsgValidation) { + log.Error(err.Error()) + } else { + log.Error("recover payload failed", "txhash", batchDeliveredLog.TxHash, "data", data) + return err + } } for _, preimages := range preimages { for hash, contents := range preimages { diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 882db2ee98..deaf4dc2dc 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -10,7 +10,6 @@ import ( "math/big" "time" - "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" "github.com/offchainlabs/nitro/validator" @@ -50,7 +49,6 @@ type L1Validator struct { wallet ValidatorWalletInterface callOpts bind.CallOpts - das daprovider.DASReader inboxTracker InboxTrackerInterface txStreamer TransactionStreamerInterface blockValidator *BlockValidator @@ -62,7 +60,6 @@ func NewL1Validator( wallet ValidatorWalletInterface, validatorUtilsAddress common.Address, callOpts bind.CallOpts, - das daprovider.DASReader, inboxTracker InboxTrackerInterface, txStreamer TransactionStreamerInterface, blockValidator *BlockValidator, @@ -90,7 +87,6 @@ func NewL1Validator( builder: builder, wallet: wallet, callOpts: callOpts, - das: das, inboxTracker: inboxTracker, txStreamer: txStreamer, blockValidator: blockValidator, diff --git a/staker/staker.go b/staker/staker.go index 2a95e9c9f7..da6413e122 100644 --- a/staker/staker.go +++ b/staker/staker.go @@ -291,7 +291,7 @@ func NewStaker( } client := l1Reader.Client() val, err := NewL1Validator(client, wallet, validatorUtilsAddress, callOpts, - statelessBlockValidator.daService, statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) + statelessBlockValidator.inboxTracker, statelessBlockValidator.streamer, blockValidator) if err != nil { return nil, err } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 34dfdd5401..8c7e0cf191 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -38,8 +38,7 @@ type StatelessBlockValidator struct { inboxTracker InboxTrackerInterface streamer TransactionStreamerInterface db ethdb.Database - daService daprovider.DASReader - blobReader daprovider.BlobReader + dapReaders []daprovider.Reader moduleMutex sync.Mutex currentWasmModuleRoot common.Hash @@ -189,8 +188,7 @@ func NewStatelessBlockValidator( streamer TransactionStreamerInterface, recorder execution.ExecutionRecorder, arbdb ethdb.Database, - das daprovider.DASReader, - blobReader daprovider.BlobReader, + dapReaders []daprovider.Reader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -210,8 +208,7 @@ func NewStatelessBlockValidator( inboxTracker: inbox, streamer: streamer, db: arbdb, - daService: das, - blobReader: blobReader, + dapReaders: dapReaders, } return validator, nil } @@ -261,39 +258,27 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } - if daprovider.IsBlobHashesHeaderByte(batch.Data[40]) { - payload := batch.Data[41:] - if len(payload)%len(common.Hash{}) != 0 { - return fmt.Errorf("blob batch data is not a list of hashes as expected") - } - versionedHashes := make([]common.Hash, len(payload)/len(common.Hash{})) - for i := 0; i*32 < len(payload); i += 1 { - copy(versionedHashes[i][:], payload[i*32:(i+1)*32]) - } - blobs, err := v.blobReader.GetBlobs(ctx, batch.BlockHash, versionedHashes) - if err != nil { - return fmt.Errorf("failed to get blobs: %w", err) - } - if e.Preimages[arbutil.EthVersionedHashPreimageType] == nil { - e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) - } - for i, blob := range blobs { - // Prevent aliasing `blob` when slicing it, as for range loops overwrite the same variable - // Won't be necessary after Go 1.22 with https://go.dev/blog/loopvar-preview - b := blob - e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = b[:] - } - } - if daprovider.IsDASMessageHeaderByte(batch.Data[40]) { - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := daprovider.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, daprovider.KeysetValidate, - ) + foundDA := false + for _, dapReader := range v.dapReaders { + if dapReader != nil && dapReader.IsValidHeaderByte(batch.Data[40]) { + recorder := daprovider.RecordPreimagesTo(e.Preimages) + _, err := dapReader.RecoverPayloadFromBatch(ctx, batch.Number, batch.BlockHash, batch.Data, recorder, true) if err != nil { - return err + // Matches the way keyset validation was done inside DAS readers i.e logging the error + // But other daproviders might just want to return the error + if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(batch.Data[40]) { + log.Error(err.Error()) + } else { + return err + } } + foundDA = true + break + } + } + if !foundDA { + if daprovider.IsDASMessageHeaderByte(batch.Data[40]) { + log.Error("No DAS Reader configured, but sequencer message found with DAS header") } } } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 03b6d690f1..2f36ff6142 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -389,7 +389,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2.ArbDB, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -406,7 +406,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2.ArbDB, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index d5bbeaa079..2d188295ec 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -208,7 +208,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeA, l2nodeA.ArbDB, nil, - nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -261,7 +260,6 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeB, l2nodeB.ArbDB, nil, - nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) From 00193d032983a35ae9f96b04f05265a455c98fb7 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 5 Apr 2024 15:22:44 -0500 Subject: [PATCH 07/40] code refactor --- arbstate/daprovider/util.go | 2 ++ arbstate/daprovider/writer.go | 2 +- staker/stateless_block_validator.go | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arbstate/daprovider/util.go b/arbstate/daprovider/util.go index 6a7d27ab08..054bde5503 100644 --- a/arbstate/daprovider/util.go +++ b/arbstate/daprovider/util.go @@ -43,6 +43,8 @@ type BlobReader interface { Initialize(ctx context.Context) error } +// PreimageRecorder is used to add (key,value) pair to the map accessed by key = ty of a bigger map, preimages. +// If ty doesn't exist as a key in the preimages map, then it is intialized to map[common.Hash][]byte and then (key,value) pair is added type PreimageRecorder func(key common.Hash, value []byte, ty arbutil.PreimageType) // RecordPreimagesTo takes in preimages map and returns a function that can be used diff --git a/arbstate/daprovider/writer.go b/arbstate/daprovider/writer.go index 44c53fb87d..75b356c4b8 100644 --- a/arbstate/daprovider/writer.go +++ b/arbstate/daprovider/writer.go @@ -33,7 +33,7 @@ type writerForDAS struct { } func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64, sig []byte, disableFallbackStoreDataOnChain bool) ([]byte, error) { - cert, err := d.dasWriter.Store(ctx, message, timeout, []byte{}) // b.daWriter will append signature if enabled + cert, err := d.dasWriter.Store(ctx, message, timeout, []byte{}) if errors.Is(err, ErrBatchToDasFailed) { if disableFallbackStoreDataOnChain { return nil, errors.New("unable to batch to DAS and fallback storing data on chain is disabled") diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 8c7e0cf191..edc8bb97dd 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -261,8 +261,8 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * foundDA := false for _, dapReader := range v.dapReaders { if dapReader != nil && dapReader.IsValidHeaderByte(batch.Data[40]) { - recorder := daprovider.RecordPreimagesTo(e.Preimages) - _, err := dapReader.RecoverPayloadFromBatch(ctx, batch.Number, batch.BlockHash, batch.Data, recorder, true) + preimageRecorder := daprovider.RecordPreimagesTo(e.Preimages) + _, err := dapReader.RecoverPayloadFromBatch(ctx, batch.Number, batch.BlockHash, batch.Data, preimageRecorder, true) if err != nil { // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error From bae6788d56cc113f5e54b00f864ae91b9dc245fa Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 9 Apr 2024 12:43:58 -0500 Subject: [PATCH 08/40] code cleanup --- arbnode/node.go | 7 ------- cmd/replay/main.go | 8 ++++---- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 2cd92d53fa..21f3a6fd85 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -552,13 +552,6 @@ func createNodeImpl( var statelessBlockValidator *staker.StatelessBlockValidator if config.BlockValidator.ValidationServerConfigs[0].URL != "" { - var dapReaders []daprovider.Reader - if daReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForDAS(daReader)) - } - if blobReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) - } statelessBlockValidator, err = staker.NewStatelessBlockValidator( inboxReader, inboxTracker, diff --git a/cmd/replay/main.go b/cmd/replay/main.go index d3a581b086..0b9214755d 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -211,12 +211,12 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = daprovider.KeysetDontValidate } - var daProviders []daprovider.Reader + var dapReaders []daprovider.Reader if dasReader != nil { - daProviders = append(daProviders, daprovider.NewReaderForDAS(dasReader)) + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader)) } - daProviders = append(daProviders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, daProviders, keysetValidationMode) + dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { From 386ce2eca21243bc08a5e2bff934c5375eede21d Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 18 Apr 2024 22:19:31 -0300 Subject: [PATCH 09/40] sequencer adds the block hash to the feed --- arbnode/inbox_tracker.go | 2 +- arbnode/node.go | 4 +-- arbnode/transaction_streamer.go | 19 +++++++--- broadcaster/broadcaster.go | 18 +++++++--- broadcaster/message/message.go | 1 + .../message/message_serialization_test.go | 35 ++++++++++++++++++- execution/gethexec/executionengine.go | 8 ++--- execution/interface.go | 2 +- 8 files changed, 71 insertions(+), 18 deletions(-) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index b758e95e62..d1bc8f9ed1 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -302,7 +302,7 @@ func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcas if err != nil { return fmt.Errorf("error getting message %v: %w", seqNum, err) } - feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum) + feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum, nil) if err != nil { return fmt.Errorf("error creating broadcast feed message %v: %w", seqNum, err) } diff --git a/arbnode/node.go b/arbnode/node.go index 7a7a99ba88..22cc3ad7f5 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -997,8 +997,8 @@ func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, return n.InboxReader.GetFinalizedMsgCount(ctx) } -func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { - return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) +func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, blockHash common.Hash) error { + return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta, blockHash) } func (n *Node) ExpectChosenSequencer() error { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 017c23c496..d0565d7a4c 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -936,7 +936,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return endBatch(batch) } - err := s.writeMessages(messageStartPos, messages, batch) + err := s.writeMessages(messageStartPos, messages, nil, batch) if err != nil { return err } @@ -966,7 +966,11 @@ func (s *TransactionStreamer) ExpectChosenSequencer() error { return nil } -func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { +func (s *TransactionStreamer) WriteMessageFromSequencer( + pos arbutil.MessageIndex, + msgWithMeta arbostypes.MessageWithMetadata, + blockHash common.Hash, +) error { if err := s.ExpectChosenSequencer(); err != nil { return err } @@ -990,7 +994,7 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex } } - if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { + if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, &blockHash, nil); err != nil { return err } @@ -1024,7 +1028,12 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty // The mutex must be held, and pos must be the latest message count. // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. -func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { +func (s *TransactionStreamer) writeMessages( + pos arbutil.MessageIndex, + messages []arbostypes.MessageWithMetadata, + blockHash *common.Hash, + batch ethdb.Batch, +) error { if batch == nil { batch = s.db.NewBatch() } @@ -1050,7 +1059,7 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ } if s.broadcastServer != nil { - if err := s.broadcastServer.BroadcastMessages(messages, pos); err != nil { + if err := s.broadcastServer.BroadcastMessages(messages, pos, blockHash); err != nil { log.Error("failed broadcasting message", "pos", pos, "err", err) } } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 242b8f9eeb..6a08007ed1 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -11,6 +11,7 @@ import ( "github.com/gobwas/ws" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -38,7 +39,11 @@ func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId u } } -func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMetadata, sequenceNumber arbutil.MessageIndex) (*m.BroadcastFeedMessage, error) { +func (b *Broadcaster) NewBroadcastFeedMessage( + message arbostypes.MessageWithMetadata, + sequenceNumber arbutil.MessageIndex, + blockHash *common.Hash, +) (*m.BroadcastFeedMessage, error) { var messageSignature []byte if b.dataSigner != nil { hash, err := message.Hash(sequenceNumber, b.chainId) @@ -54,6 +59,7 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta return &m.BroadcastFeedMessage{ SequenceNumber: sequenceNumber, Message: message, + BlockHash: blockHash, Signature: messageSignature, }, nil } @@ -65,7 +71,7 @@ func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq ar err = errors.New("panic in BroadcastSingle") } }() - bfm, err := b.NewBroadcastFeedMessage(msg, seq) + bfm, err := b.NewBroadcastFeedMessage(msg, seq, nil) if err != nil { return err } @@ -82,7 +88,11 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { b.BroadcastFeedMessages(broadcastFeedMessages) } -func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { +func (b *Broadcaster) BroadcastMessages( + messages []arbostypes.MessageWithMetadata, + seq arbutil.MessageIndex, + blockHash *common.Hash, +) (err error) { defer func() { if r := recover(); r != nil { log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) @@ -91,7 +101,7 @@ func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadat }() var feedMessages []*m.BroadcastFeedMessage for i, msg := range messages { - bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i)) + bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i), blockHash) if err != nil { return err } diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index a575ae5cd0..aca9598754 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -34,6 +34,7 @@ type BroadcastMessage struct { type BroadcastFeedMessage struct { SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` Message arbostypes.MessageWithMetadata `json:"message"` + BlockHash *common.Hash `json:"blockHash,omitempty"` Signature []byte `json:"signature"` CumulativeSumMsgSize uint64 `json:"-"` diff --git a/broadcaster/message/message_serialization_test.go b/broadcaster/message/message_serialization_test.go index c3e14a86ae..1d8c10e388 100644 --- a/broadcaster/message/message_serialization_test.go +++ b/broadcaster/message/message_serialization_test.go @@ -13,7 +13,40 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" ) -func ExampleBroadcastMessage_broadcastfeedmessage() { +func ExampleBroadcastMessage_broadcastfeedmessageWithBlockHash() { + var requestId common.Hash + msg := BroadcastMessage{ + Version: 1, + Messages: []*BroadcastFeedMessage{ + { + SequenceNumber: 12345, + Message: arbostypes.MessageWithMetadata{ + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: 0, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &requestId, + L1BaseFee: big.NewInt(0), + }, + L2msg: []byte{0xde, 0xad, 0xbe, 0xef}, + }, + DelayedMessagesRead: 3333, + }, + BlockHash: &common.Hash{0: 0xff}, + Signature: nil, + }, + }, + } + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + _ = encoder.Encode(msg) + fmt.Println(buf.String()) + // Output: {"version":1,"messages":[{"sequenceNumber":12345,"message":{"message":{"header":{"kind":0,"sender":"0x0000000000000000000000000000000000000000","blockNumber":0,"timestamp":0,"requestId":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeeL1":0},"l2Msg":"3q2+7w=="},"delayedMessagesRead":3333},"blockHash":"0xff00000000000000000000000000000000000000000000000000000000000000","signature":null}]} +} + +func ExampleBroadcastMessage_broadcastfeedmessageWithoutBlockHash() { var requestId common.Hash msg := BroadcastMessage{ Version: 1, diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 16267720bb..7645c06b57 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -368,7 +368,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } - err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, block.Hash()) if err != nil { return nil, err } @@ -414,13 +414,13 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp DelayedMessagesRead: delayedSeqNum + 1, } - err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) + startTime := time.Now() + block, statedb, receipts, err := s.createBlockFromNextMessage(&messageWithMeta, false) if err != nil { return nil, err } - startTime := time.Now() - block, statedb, receipts, err := s.createBlockFromNextMessage(&messageWithMeta, false) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, block.Hash()) if err != nil { return nil, err } diff --git a/execution/interface.go b/execution/interface.go index 7540a09210..54d38ee13a 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -90,7 +90,7 @@ type ConsensusInfo interface { } type ConsensusSequencer interface { - WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error + WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, blockHash common.Hash) error ExpectChosenSequencer() error CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) BacklogL1GasCharged() uint64 From 0d5ec45c98d63458aa68240ca4c7f956c92e50c4 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Fri, 19 Apr 2024 09:52:03 -0300 Subject: [PATCH 10/40] broadcaster.BroadcastMessages allows different block hashes for different MessageWithMetadata --- arbnode/transaction_streamer.go | 31 +++++++++++++++++++++++-------- broadcaster/broadcaster.go | 12 ++++++++---- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index d0565d7a4c..fc9e138ef6 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -936,7 +936,17 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return endBatch(batch) } - err := s.writeMessages(messageStartPos, messages, nil, batch) + msgsWithBlocksHashes := make([]broadcaster.MessageWithMetadataAndBlockHash, 0, len(messages)) + for _, msg := range messages { + msgsWithBlocksHashes = append( + msgsWithBlocksHashes, + broadcaster.MessageWithMetadataAndBlockHash{ + Message: msg, + BlockHash: nil, + }, + ) + } + err := s.writeMessages(messageStartPos, msgsWithBlocksHashes, batch) if err != nil { return err } @@ -994,7 +1004,13 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( } } - if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, &blockHash, nil); err != nil { + msgWithBlockHash := []broadcaster.MessageWithMetadataAndBlockHash{ + { + Message: msgWithMeta, + BlockHash: &blockHash, + }, + } + if err := s.writeMessages(pos, msgWithBlockHash, nil); err != nil { return err } @@ -1030,21 +1046,20 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. func (s *TransactionStreamer) writeMessages( pos arbutil.MessageIndex, - messages []arbostypes.MessageWithMetadata, - blockHash *common.Hash, + messagesWithBlockHash []broadcaster.MessageWithMetadataAndBlockHash, batch ethdb.Batch, ) error { if batch == nil { batch = s.db.NewBatch() } - for i, msg := range messages { - err := s.writeMessage(pos+arbutil.MessageIndex(i), msg, batch) + for i, msg := range messagesWithBlockHash { + err := s.writeMessage(pos+arbutil.MessageIndex(i), msg.Message, batch) if err != nil { return err } } - err := setMessageCount(batch, pos+arbutil.MessageIndex(len(messages))) + err := setMessageCount(batch, pos+arbutil.MessageIndex(len(messagesWithBlockHash))) if err != nil { return err } @@ -1059,7 +1074,7 @@ func (s *TransactionStreamer) writeMessages( } if s.broadcastServer != nil { - if err := s.broadcastServer.BroadcastMessages(messages, pos, blockHash); err != nil { + if err := s.broadcastServer.BroadcastMessages(messagesWithBlockHash, pos); err != nil { log.Error("failed broadcasting message", "pos", pos, "err", err) } } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 6a08007ed1..ec564e6521 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -22,6 +22,11 @@ import ( "github.com/offchainlabs/nitro/wsbroadcastserver" ) +type MessageWithMetadataAndBlockHash struct { + Message arbostypes.MessageWithMetadata + BlockHash *common.Hash +} + type Broadcaster struct { server *wsbroadcastserver.WSBroadcastServer backlog backlog.Backlog @@ -89,9 +94,8 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { } func (b *Broadcaster) BroadcastMessages( - messages []arbostypes.MessageWithMetadata, + messagesWithBlockHash []MessageWithMetadataAndBlockHash, seq arbutil.MessageIndex, - blockHash *common.Hash, ) (err error) { defer func() { if r := recover(); r != nil { @@ -100,8 +104,8 @@ func (b *Broadcaster) BroadcastMessages( } }() var feedMessages []*m.BroadcastFeedMessage - for i, msg := range messages { - bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i), blockHash) + for i, msg := range messagesWithBlockHash { + bfm, err := b.NewBroadcastFeedMessage(msg.Message, seq+arbutil.MessageIndex(i), msg.BlockHash) if err != nil { return err } From ab980846b1c55ff51968c0e9112dcb264b430f36 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 24 Apr 2024 20:31:58 -0300 Subject: [PATCH 11/40] moves l2BlockHash to MessageWithMetadata --- arbnode/inbox_tracker.go | 2 +- arbnode/node.go | 4 +- arbnode/transaction_streamer.go | 57 +++++++------------ arbos/arbostypes/messagewithmeta.go | 1 + broadcaster/broadcaster.go | 24 ++------ broadcaster/message/message.go | 1 - .../message/message_serialization_test.go | 4 +- execution/gethexec/executionengine.go | 8 ++- execution/interface.go | 2 +- 9 files changed, 38 insertions(+), 65 deletions(-) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index d1bc8f9ed1..b758e95e62 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -302,7 +302,7 @@ func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcas if err != nil { return fmt.Errorf("error getting message %v: %w", seqNum, err) } - feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum, nil) + feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum) if err != nil { return fmt.Errorf("error creating broadcast feed message %v: %w", seqNum, err) } diff --git a/arbnode/node.go b/arbnode/node.go index 22cc3ad7f5..7a7a99ba88 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -997,8 +997,8 @@ func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, return n.InboxReader.GetFinalizedMsgCount(ctx) } -func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, blockHash common.Hash) error { - return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta, blockHash) +func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { + return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) } func (n *Node) ExpectChosenSequencer() error { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index fc9e138ef6..06e67feaa6 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -773,19 +773,25 @@ func (s *TransactionStreamer) countDuplicateMessages( } var duplicateMessage bool if nextMessage.Message != nil { - if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil { - // Remove both of the batch gas costs and see if the messages still differ + if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil || dbMessageParsed.L2BlockHash == nil || nextMessage.L2BlockHash == nil { + // Remove both of the batch gas costs and l2 block hashes and see if the messages still differ nextMessageCopy := nextMessage nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) *nextMessageCopy.Message = *nextMessage.Message + batchGasCostBkup := dbMessageParsed.Message.BatchGasCost + l2BlockHashBkup := dbMessageParsed.L2BlockHash + dbMessageParsed.Message.BatchGasCost = nil + dbMessageParsed.L2BlockHash = nil nextMessageCopy.Message.BatchGasCost = nil + nextMessageCopy.L2BlockHash = nil + if reflect.DeepEqual(dbMessageParsed, nextMessageCopy) { - // Actually this isn't a reorg; only the batch gas costs differed + // Actually this isn't a reorg; only the batch gas costs or l2 block hashes differed duplicateMessage = true - // If possible - update the message in the database to add the gas cost cache. - if batch != nil && nextMessage.Message.BatchGasCost != nil { + // If possible - update the message in the database to add the gas cost and l2 block hashes. + if batch != nil && (nextMessage.Message.BatchGasCost != nil || nextMessage.L2BlockHash != nil) { if *batch == nil { *batch = s.db.NewBatch() } @@ -795,6 +801,7 @@ func (s *TransactionStreamer) countDuplicateMessages( } } dbMessageParsed.Message.BatchGasCost = batchGasCostBkup + dbMessageParsed.L2BlockHash = l2BlockHashBkup } } @@ -936,17 +943,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return endBatch(batch) } - msgsWithBlocksHashes := make([]broadcaster.MessageWithMetadataAndBlockHash, 0, len(messages)) - for _, msg := range messages { - msgsWithBlocksHashes = append( - msgsWithBlocksHashes, - broadcaster.MessageWithMetadataAndBlockHash{ - Message: msg, - BlockHash: nil, - }, - ) - } - err := s.writeMessages(messageStartPos, msgsWithBlocksHashes, batch) + err := s.writeMessages(messageStartPos, messages, batch) if err != nil { return err } @@ -976,11 +973,7 @@ func (s *TransactionStreamer) ExpectChosenSequencer() error { return nil } -func (s *TransactionStreamer) WriteMessageFromSequencer( - pos arbutil.MessageIndex, - msgWithMeta arbostypes.MessageWithMetadata, - blockHash common.Hash, -) error { +func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { if err := s.ExpectChosenSequencer(); err != nil { return err } @@ -1004,13 +997,7 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( } } - msgWithBlockHash := []broadcaster.MessageWithMetadataAndBlockHash{ - { - Message: msgWithMeta, - BlockHash: &blockHash, - }, - } - if err := s.writeMessages(pos, msgWithBlockHash, nil); err != nil { + if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { return err } @@ -1044,22 +1031,18 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty // The mutex must be held, and pos must be the latest message count. // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. -func (s *TransactionStreamer) writeMessages( - pos arbutil.MessageIndex, - messagesWithBlockHash []broadcaster.MessageWithMetadataAndBlockHash, - batch ethdb.Batch, -) error { +func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { if batch == nil { batch = s.db.NewBatch() } - for i, msg := range messagesWithBlockHash { - err := s.writeMessage(pos+arbutil.MessageIndex(i), msg.Message, batch) + for i, msg := range messages { + err := s.writeMessage(pos+arbutil.MessageIndex(i), msg, batch) if err != nil { return err } } - err := setMessageCount(batch, pos+arbutil.MessageIndex(len(messagesWithBlockHash))) + err := setMessageCount(batch, pos+arbutil.MessageIndex(len(messages))) if err != nil { return err } @@ -1074,7 +1057,7 @@ func (s *TransactionStreamer) writeMessages( } if s.broadcastServer != nil { - if err := s.broadcastServer.BroadcastMessages(messagesWithBlockHash, pos); err != nil { + if err := s.broadcastServer.BroadcastMessages(messages, pos); err != nil { log.Error("failed broadcasting message", "pos", pos, "err", err) } } diff --git a/arbos/arbostypes/messagewithmeta.go b/arbos/arbostypes/messagewithmeta.go index a3d4f5e3c3..99c6f80509 100644 --- a/arbos/arbostypes/messagewithmeta.go +++ b/arbos/arbostypes/messagewithmeta.go @@ -16,6 +16,7 @@ var uniquifyingPrefix = []byte("Arbitrum Nitro Feed:") type MessageWithMetadata struct { Message *L1IncomingMessage `json:"message"` DelayedMessagesRead uint64 `json:"delayedMessagesRead"` + L2BlockHash *common.Hash `json:"l2BlockHash,omitempty" rlp:"nilList,optional"` } var EmptyTestMessageWithMetadata = MessageWithMetadata{ diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ec564e6521..242b8f9eeb 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -11,7 +11,6 @@ import ( "github.com/gobwas/ws" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -22,11 +21,6 @@ import ( "github.com/offchainlabs/nitro/wsbroadcastserver" ) -type MessageWithMetadataAndBlockHash struct { - Message arbostypes.MessageWithMetadata - BlockHash *common.Hash -} - type Broadcaster struct { server *wsbroadcastserver.WSBroadcastServer backlog backlog.Backlog @@ -44,11 +38,7 @@ func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId u } } -func (b *Broadcaster) NewBroadcastFeedMessage( - message arbostypes.MessageWithMetadata, - sequenceNumber arbutil.MessageIndex, - blockHash *common.Hash, -) (*m.BroadcastFeedMessage, error) { +func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMetadata, sequenceNumber arbutil.MessageIndex) (*m.BroadcastFeedMessage, error) { var messageSignature []byte if b.dataSigner != nil { hash, err := message.Hash(sequenceNumber, b.chainId) @@ -64,7 +54,6 @@ func (b *Broadcaster) NewBroadcastFeedMessage( return &m.BroadcastFeedMessage{ SequenceNumber: sequenceNumber, Message: message, - BlockHash: blockHash, Signature: messageSignature, }, nil } @@ -76,7 +65,7 @@ func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq ar err = errors.New("panic in BroadcastSingle") } }() - bfm, err := b.NewBroadcastFeedMessage(msg, seq, nil) + bfm, err := b.NewBroadcastFeedMessage(msg, seq) if err != nil { return err } @@ -93,10 +82,7 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { b.BroadcastFeedMessages(broadcastFeedMessages) } -func (b *Broadcaster) BroadcastMessages( - messagesWithBlockHash []MessageWithMetadataAndBlockHash, - seq arbutil.MessageIndex, -) (err error) { +func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { defer func() { if r := recover(); r != nil { log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) @@ -104,8 +90,8 @@ func (b *Broadcaster) BroadcastMessages( } }() var feedMessages []*m.BroadcastFeedMessage - for i, msg := range messagesWithBlockHash { - bfm, err := b.NewBroadcastFeedMessage(msg.Message, seq+arbutil.MessageIndex(i), msg.BlockHash) + for i, msg := range messages { + bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i)) if err != nil { return err } diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index aca9598754..a575ae5cd0 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -34,7 +34,6 @@ type BroadcastMessage struct { type BroadcastFeedMessage struct { SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` Message arbostypes.MessageWithMetadata `json:"message"` - BlockHash *common.Hash `json:"blockHash,omitempty"` Signature []byte `json:"signature"` CumulativeSumMsgSize uint64 `json:"-"` diff --git a/broadcaster/message/message_serialization_test.go b/broadcaster/message/message_serialization_test.go index 1d8c10e388..ce7baf03f0 100644 --- a/broadcaster/message/message_serialization_test.go +++ b/broadcaster/message/message_serialization_test.go @@ -33,8 +33,8 @@ func ExampleBroadcastMessage_broadcastfeedmessageWithBlockHash() { L2msg: []byte{0xde, 0xad, 0xbe, 0xef}, }, DelayedMessagesRead: 3333, + L2BlockHash: &common.Hash{0: 0xff}, }, - BlockHash: &common.Hash{0: 0xff}, Signature: nil, }, }, @@ -43,7 +43,7 @@ func ExampleBroadcastMessage_broadcastfeedmessageWithBlockHash() { encoder := json.NewEncoder(&buf) _ = encoder.Encode(msg) fmt.Println(buf.String()) - // Output: {"version":1,"messages":[{"sequenceNumber":12345,"message":{"message":{"header":{"kind":0,"sender":"0x0000000000000000000000000000000000000000","blockNumber":0,"timestamp":0,"requestId":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeeL1":0},"l2Msg":"3q2+7w=="},"delayedMessagesRead":3333},"blockHash":"0xff00000000000000000000000000000000000000000000000000000000000000","signature":null}]} + // Output: {"version":1,"messages":[{"sequenceNumber":12345,"message":{"message":{"header":{"kind":0,"sender":"0x0000000000000000000000000000000000000000","blockNumber":0,"timestamp":0,"requestId":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeeL1":0},"l2Msg":"3q2+7w=="},"delayedMessagesRead":3333,"l2BlockHash":"0xff00000000000000000000000000000000000000000000000000000000000000"},"signature":null}]} } func ExampleBroadcastMessage_broadcastfeedmessageWithoutBlockHash() { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 7645c06b57..3946da2aed 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -358,9 +358,11 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } + l2BlockHash := block.Hash() msgWithMeta := arbostypes.MessageWithMetadata{ Message: msg, DelayedMessagesRead: delayedMessagesRead, + L2BlockHash: &l2BlockHash, } pos, err := s.BlockNumberToMessageIndex(lastBlockHeader.Number.Uint64() + 1) @@ -368,7 +370,7 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } - err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, block.Hash()) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta) if err != nil { return nil, err } @@ -419,8 +421,10 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp if err != nil { return nil, err } + blockHash := block.Hash() + messageWithMeta.L2BlockHash = &blockHash - err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, block.Hash()) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) if err != nil { return nil, err } diff --git a/execution/interface.go b/execution/interface.go index 54d38ee13a..7540a09210 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -90,7 +90,7 @@ type ConsensusInfo interface { } type ConsensusSequencer interface { - WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, blockHash common.Hash) error + WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error ExpectChosenSequencer() error CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) BacklogL1GasCharged() uint64 From 8db287c7bbfded43516151a7ec14f49bb1be27fa Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 24 Apr 2024 20:49:02 -0300 Subject: [PATCH 12/40] fix block computation time --- execution/gethexec/executionengine.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 3946da2aed..82c4726a1a 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -421,6 +421,8 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp if err != nil { return nil, err } + blockCalcTime := time.Since(startTime) + blockHash := block.Hash() messageWithMeta.L2BlockHash = &blockHash @@ -429,7 +431,7 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp return nil, err } - err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) + err = s.appendBlock(block, statedb, receipts, blockCalcTime) if err != nil { return nil, err } From eaf6a8c507a632624daf9dfe0a529cf8d877a231 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 25 Apr 2024 08:31:57 -0300 Subject: [PATCH 13/40] broadcast messages after producing a block and not after writing them in the db --- arbnode/node.go | 4 ++++ arbnode/transaction_streamer.go | 16 ++++++++++------ broadcaster/broadcaster.go | 21 --------------------- execution/gethexec/executionengine.go | 8 ++++++++ execution/interface.go | 1 + 5 files changed, 23 insertions(+), 27 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 7a7a99ba88..7e0532e5b8 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -997,6 +997,10 @@ func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, return n.InboxReader.GetFinalizedMsgCount(ctx) } +func (n *Node) BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex) { + n.TxStreamer.BroadcastMessage(msg, pos) +} + func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 06e67feaa6..b25cd136be 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -1000,6 +1000,7 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { return err } + s.BroadcastMessage(msgWithMeta, pos) return nil } @@ -1029,6 +1030,15 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty return batch.Put(key, msgBytes) } +func (s *TransactionStreamer) BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex) { + if s.broadcastServer == nil { + return + } + if err := s.broadcastServer.BroadcastSingle(msg, pos); err != nil { + log.Error("failed broadcasting message", "pos", pos, "err", err) + } +} + // The mutex must be held, and pos must be the latest message count. // `batch` may be nil, which initializes a new batch. The batch is closed out in this function. func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages []arbostypes.MessageWithMetadata, batch ethdb.Batch) error { @@ -1056,12 +1066,6 @@ func (s *TransactionStreamer) writeMessages(pos arbutil.MessageIndex, messages [ default: } - if s.broadcastServer != nil { - if err := s.broadcastServer.BroadcastMessages(messages, pos); err != nil { - log.Error("failed broadcasting message", "pos", pos, "err", err) - } - } - return nil } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 242b8f9eeb..62e7f4a7bc 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -82,27 +82,6 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { b.BroadcastFeedMessages(broadcastFeedMessages) } -func (b *Broadcaster) BroadcastMessages(messages []arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { - defer func() { - if r := recover(); r != nil { - log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) - err = errors.New("panic in BroadcastMessages") - } - }() - var feedMessages []*m.BroadcastFeedMessage - for i, msg := range messages { - bfm, err := b.NewBroadcastFeedMessage(msg, seq+arbutil.MessageIndex(i)) - if err != nil { - return err - } - feedMessages = append(feedMessages, bfm) - } - - b.BroadcastFeedMessages(feedMessages) - - return nil -} - func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) { bm := &m.BroadcastMessage{ diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 82c4726a1a..6b5f0b9be1 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -630,6 +630,14 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, if err != nil { return err } + + if s.consensus != nil { + l2BlockHash := block.Hash() + msg.L2BlockHash = &l2BlockHash + + s.consensus.BroadcastMessage(*msg, num) + } + err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { return err diff --git a/execution/interface.go b/execution/interface.go index 7540a09210..299d0059bd 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -90,6 +90,7 @@ type ConsensusInfo interface { } type ConsensusSequencer interface { + BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error ExpectChosenSequencer() error CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) From a679e4206887a77a5c9df0695bb0904939616a37 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 25 Apr 2024 16:43:13 -0300 Subject: [PATCH 14/40] moves blockHash to BroadcastFeedMessage again --- arbnode/inbox_tracker.go | 2 +- arbnode/node.go | 8 ++--- arbnode/transaction_streamer.go | 30 +++++++++++-------- arbos/arbostypes/messagewithmeta.go | 1 - broadcastclient/broadcastclient_test.go | 12 ++++---- broadcaster/broadcaster.go | 16 ++++++++-- broadcaster/broadcaster_test.go | 14 ++++----- broadcaster/message/message.go | 1 + .../message/message_serialization_test.go | 4 +-- execution/gethexec/executionengine.go | 30 ++++++++++--------- execution/interface.go | 4 +-- 11 files changed, 69 insertions(+), 53 deletions(-) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index b758e95e62..d1bc8f9ed1 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -302,7 +302,7 @@ func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcas if err != nil { return fmt.Errorf("error getting message %v: %w", seqNum, err) } - feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum) + feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum, nil) if err != nil { return fmt.Errorf("error creating broadcast feed message %v: %w", seqNum, err) } diff --git a/arbnode/node.go b/arbnode/node.go index 7e0532e5b8..33aed96584 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -997,12 +997,12 @@ func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, return n.InboxReader.GetFinalizedMsgCount(ctx) } -func (n *Node) BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex) { - n.TxStreamer.BroadcastMessage(msg, pos) +func (n *Node) BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex, msgResult execution.MessageResult) { + n.TxStreamer.BroadcastMessage(msg, pos, msgResult) } -func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { - return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta) +func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult execution.MessageResult) error { + return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta, msgResult) } func (n *Node) ExpectChosenSequencer() error { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index b25cd136be..46c0fcdb7e 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -773,25 +773,22 @@ func (s *TransactionStreamer) countDuplicateMessages( } var duplicateMessage bool if nextMessage.Message != nil { - if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil || dbMessageParsed.L2BlockHash == nil || nextMessage.L2BlockHash == nil { - // Remove both of the batch gas costs and l2 block hashes and see if the messages still differ + if dbMessageParsed.Message.BatchGasCost == nil || nextMessage.Message.BatchGasCost == nil { + // Remove both of the batch gas costs and see if the messages still differ nextMessageCopy := nextMessage nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) *nextMessageCopy.Message = *nextMessage.Message batchGasCostBkup := dbMessageParsed.Message.BatchGasCost - l2BlockHashBkup := dbMessageParsed.L2BlockHash dbMessageParsed.Message.BatchGasCost = nil - dbMessageParsed.L2BlockHash = nil nextMessageCopy.Message.BatchGasCost = nil - nextMessageCopy.L2BlockHash = nil if reflect.DeepEqual(dbMessageParsed, nextMessageCopy) { - // Actually this isn't a reorg; only the batch gas costs or l2 block hashes differed + // Actually this isn't a reorg; only the batch gas costs differed duplicateMessage = true - // If possible - update the message in the database to add the gas cost and l2 block hashes. - if batch != nil && (nextMessage.Message.BatchGasCost != nil || nextMessage.L2BlockHash != nil) { + // If possible - update the message in the database to add the gas cost cache. + if batch != nil && nextMessage.Message.BatchGasCost != nil { if *batch == nil { *batch = s.db.NewBatch() } @@ -801,7 +798,6 @@ func (s *TransactionStreamer) countDuplicateMessages( } } dbMessageParsed.Message.BatchGasCost = batchGasCostBkup - dbMessageParsed.L2BlockHash = l2BlockHashBkup } } @@ -973,7 +969,11 @@ func (s *TransactionStreamer) ExpectChosenSequencer() error { return nil } -func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error { +func (s *TransactionStreamer) WriteMessageFromSequencer( + pos arbutil.MessageIndex, + msgWithMeta arbostypes.MessageWithMetadata, + msgResult execution.MessageResult, +) error { if err := s.ExpectChosenSequencer(); err != nil { return err } @@ -1000,7 +1000,7 @@ func (s *TransactionStreamer) WriteMessageFromSequencer(pos arbutil.MessageIndex if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { return err } - s.BroadcastMessage(msgWithMeta, pos) + s.BroadcastMessage(msgWithMeta, pos, msgResult) return nil } @@ -1030,11 +1030,15 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty return batch.Put(key, msgBytes) } -func (s *TransactionStreamer) BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex) { +func (s *TransactionStreamer) BroadcastMessage( + msg arbostypes.MessageWithMetadata, + pos arbutil.MessageIndex, + msgResult execution.MessageResult, +) { if s.broadcastServer == nil { return } - if err := s.broadcastServer.BroadcastSingle(msg, pos); err != nil { + if err := s.broadcastServer.BroadcastSingle(msg, pos, &msgResult.BlockHash); err != nil { log.Error("failed broadcasting message", "pos", pos, "err", err) } } diff --git a/arbos/arbostypes/messagewithmeta.go b/arbos/arbostypes/messagewithmeta.go index 99c6f80509..a3d4f5e3c3 100644 --- a/arbos/arbostypes/messagewithmeta.go +++ b/arbos/arbostypes/messagewithmeta.go @@ -16,7 +16,6 @@ var uniquifyingPrefix = []byte("Arbitrum Nitro Feed:") type MessageWithMetadata struct { Message *L1IncomingMessage `json:"message"` DelayedMessagesRead uint64 `json:"delayedMessagesRead"` - L2BlockHash *common.Hash `json:"l2BlockHash,omitempty" rlp:"nilList,optional"` } var EmptyTestMessageWithMetadata = MessageWithMetadata{ diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index 84356d77e0..44b48192ab 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -105,7 +105,7 @@ func testReceiveMessages(t *testing.T, clientCompression bool, serverCompression go func() { for i := 0; i < messageCount; i++ { - Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i))) + Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil)) } }() @@ -156,7 +156,7 @@ func TestInvalidSignature(t *testing.T) { go func() { for i := 0; i < messageCount; i++ { - Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i))) + Require(t, b.BroadcastSingle(arbostypes.TestMessageWithMetadataAndRequestId, arbutil.MessageIndex(i), nil)) } }() @@ -316,7 +316,7 @@ func TestServerClientDisconnect(t *testing.T) { broadcastClient.Start(ctx) t.Log("broadcasting seq 0 message") - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0, nil)) // Wait for client to receive batch to ensure it is connected timer := time.NewTimer(5 * time.Second) @@ -387,7 +387,7 @@ func TestBroadcastClientConfirmedMessage(t *testing.T) { broadcastClient.Start(ctx) t.Log("broadcasting seq 0 message") - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0, nil)) // Wait for client to receive batch to ensure it is connected timer := time.NewTimer(5 * time.Second) @@ -724,8 +724,8 @@ func TestBroadcasterSendsCachedMessagesOnClientConnect(t *testing.T) { Require(t, b.Start(ctx)) defer b.StopAndWait() - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0)) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 0, nil)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1, nil)) var wg sync.WaitGroup for i := 0; i < 2; i++ { diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 62e7f4a7bc..ca412cce10 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -11,6 +11,7 @@ import ( "github.com/gobwas/ws" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/arbostypes" @@ -38,7 +39,11 @@ func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId u } } -func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMetadata, sequenceNumber arbutil.MessageIndex) (*m.BroadcastFeedMessage, error) { +func (b *Broadcaster) NewBroadcastFeedMessage( + message arbostypes.MessageWithMetadata, + sequenceNumber arbutil.MessageIndex, + blockHash *common.Hash, +) (*m.BroadcastFeedMessage, error) { var messageSignature []byte if b.dataSigner != nil { hash, err := message.Hash(sequenceNumber, b.chainId) @@ -54,18 +59,23 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta return &m.BroadcastFeedMessage{ SequenceNumber: sequenceNumber, Message: message, + BlockHash: blockHash, Signature: messageSignature, }, nil } -func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq arbutil.MessageIndex) (err error) { +func (b *Broadcaster) BroadcastSingle( + msg arbostypes.MessageWithMetadata, + seq arbutil.MessageIndex, + blockHash *common.Hash, +) (err error) { defer func() { if r := recover(); r != nil { log.Error("recovered error in BroadcastSingle", "recover", r, "backtrace", string(debug.Stack())) err = errors.New("panic in BroadcastSingle") } }() - bfm, err := b.NewBroadcastFeedMessage(msg, seq) + bfm, err := b.NewBroadcastFeedMessage(msg, seq, blockHash) if err != nil { return err } diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index 8ac06e9705..dc208f4163 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -70,17 +70,17 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { } // Normal broadcasting and confirming - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 1, nil)) waitUntilUpdated(t, expectMessageCount(1, "after 1 message")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 2)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 2, nil)) waitUntilUpdated(t, expectMessageCount(2, "after 2 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 3)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 3, nil)) waitUntilUpdated(t, expectMessageCount(3, "after 3 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 4)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 4, nil)) waitUntilUpdated(t, expectMessageCount(4, "after 4 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 5)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 5, nil)) waitUntilUpdated(t, expectMessageCount(5, "after 4 messages")) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 6)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 6, nil)) waitUntilUpdated(t, expectMessageCount(6, "after 4 messages")) b.Confirm(4) @@ -96,7 +96,7 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { "nothing changed because confirmed sequence number before cache")) b.Confirm(5) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 7)) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 7, nil)) waitUntilUpdated(t, expectMessageCount(2, "after 7 messages, 5 cleared by confirm")) diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index a575ae5cd0..aca9598754 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -34,6 +34,7 @@ type BroadcastMessage struct { type BroadcastFeedMessage struct { SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` Message arbostypes.MessageWithMetadata `json:"message"` + BlockHash *common.Hash `json:"blockHash,omitempty"` Signature []byte `json:"signature"` CumulativeSumMsgSize uint64 `json:"-"` diff --git a/broadcaster/message/message_serialization_test.go b/broadcaster/message/message_serialization_test.go index ce7baf03f0..1d8c10e388 100644 --- a/broadcaster/message/message_serialization_test.go +++ b/broadcaster/message/message_serialization_test.go @@ -33,8 +33,8 @@ func ExampleBroadcastMessage_broadcastfeedmessageWithBlockHash() { L2msg: []byte{0xde, 0xad, 0xbe, 0xef}, }, DelayedMessagesRead: 3333, - L2BlockHash: &common.Hash{0: 0xff}, }, + BlockHash: &common.Hash{0: 0xff}, Signature: nil, }, }, @@ -43,7 +43,7 @@ func ExampleBroadcastMessage_broadcastfeedmessageWithBlockHash() { encoder := json.NewEncoder(&buf) _ = encoder.Encode(msg) fmt.Println(buf.String()) - // Output: {"version":1,"messages":[{"sequenceNumber":12345,"message":{"message":{"header":{"kind":0,"sender":"0x0000000000000000000000000000000000000000","blockNumber":0,"timestamp":0,"requestId":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeeL1":0},"l2Msg":"3q2+7w=="},"delayedMessagesRead":3333,"l2BlockHash":"0xff00000000000000000000000000000000000000000000000000000000000000"},"signature":null}]} + // Output: {"version":1,"messages":[{"sequenceNumber":12345,"message":{"message":{"header":{"kind":0,"sender":"0x0000000000000000000000000000000000000000","blockNumber":0,"timestamp":0,"requestId":"0x0000000000000000000000000000000000000000000000000000000000000000","baseFeeL1":0},"l2Msg":"3q2+7w=="},"delayedMessagesRead":3333},"blockHash":"0xff00000000000000000000000000000000000000000000000000000000000000","signature":null}]} } func ExampleBroadcastMessage_broadcastfeedmessageWithoutBlockHash() { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 6b5f0b9be1..b94830ebbc 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -358,19 +358,20 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. return nil, err } - l2BlockHash := block.Hash() + pos, err := s.BlockNumberToMessageIndex(lastBlockHeader.Number.Uint64() + 1) + if err != nil { + return nil, err + } + msgWithMeta := arbostypes.MessageWithMetadata{ Message: msg, DelayedMessagesRead: delayedMessagesRead, - L2BlockHash: &l2BlockHash, } - - pos, err := s.BlockNumberToMessageIndex(lastBlockHeader.Number.Uint64() + 1) - if err != nil { - return nil, err + msgResult := execution.MessageResult{ + BlockHash: block.Hash(), } - err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, msgResult) if err != nil { return nil, err } @@ -423,10 +424,11 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp } blockCalcTime := time.Since(startTime) - blockHash := block.Hash() - messageWithMeta.L2BlockHash = &blockHash + msgResult := execution.MessageResult{ + BlockHash: block.Hash(), + } - err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, msgResult) if err != nil { return nil, err } @@ -632,10 +634,10 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, } if s.consensus != nil { - l2BlockHash := block.Hash() - msg.L2BlockHash = &l2BlockHash - - s.consensus.BroadcastMessage(*msg, num) + msgResult := execution.MessageResult{ + BlockHash: block.Hash(), + } + s.consensus.BroadcastMessage(*msg, num, msgResult) } err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) diff --git a/execution/interface.go b/execution/interface.go index 299d0059bd..ff6c4c7d90 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -90,8 +90,8 @@ type ConsensusInfo interface { } type ConsensusSequencer interface { - BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex) - WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata) error + BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex, msgResult MessageResult) + WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult MessageResult) error ExpectChosenSequencer() error CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) BacklogL1GasCharged() uint64 From 47175db391e8eb60e4fff12765e7864be82741c0 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Thu, 25 Apr 2024 19:23:31 -0300 Subject: [PATCH 15/40] minor fix --- arbnode/transaction_streamer.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 46c0fcdb7e..e925b60670 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -778,12 +778,9 @@ func (s *TransactionStreamer) countDuplicateMessages( nextMessageCopy := nextMessage nextMessageCopy.Message = new(arbostypes.L1IncomingMessage) *nextMessageCopy.Message = *nextMessage.Message - batchGasCostBkup := dbMessageParsed.Message.BatchGasCost - dbMessageParsed.Message.BatchGasCost = nil nextMessageCopy.Message.BatchGasCost = nil - if reflect.DeepEqual(dbMessageParsed, nextMessageCopy) { // Actually this isn't a reorg; only the batch gas costs differed duplicateMessage = true From c6eb470255a9295b400496c5867cac383a099021 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 26 Apr 2024 10:30:15 -0700 Subject: [PATCH 16/40] update geth pin --- go-ethereum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go-ethereum b/go-ethereum index 22399a74e2..19f8227480 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 22399a74e2b413e99a4f0d06c65862ced0d021c7 +Subproject commit 19f82274804e2e21fbbb3379a02502910413b46c From 626c80576e0120a0d04b17da38851941104199d6 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Fri, 26 Apr 2024 17:59:14 -0700 Subject: [PATCH 17/40] address PR comments --- das/bigcache_storage_service.go | 116 -------------------------------- 1 file changed, 116 deletions(-) delete mode 100644 das/bigcache_storage_service.go diff --git a/das/bigcache_storage_service.go b/das/bigcache_storage_service.go deleted file mode 100644 index f3586c8276..0000000000 --- a/das/bigcache_storage_service.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package das - -import ( - "context" - "fmt" - "time" - - "github.com/allegro/bigcache" - "github.com/offchainlabs/nitro/arbstate/daprovider" - "github.com/offchainlabs/nitro/das/dastree" - "github.com/offchainlabs/nitro/util/pretty" - flag "github.com/spf13/pflag" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -type BigCacheConfig struct { - // TODO add other config information like HardMaxCacheSize - Enable bool `koanf:"enable"` - Expiration time.Duration `koanf:"expiration"` - MaxEntriesInWindow int -} - -var DefaultBigCacheConfig = BigCacheConfig{ - Expiration: time.Hour, -} - -var TestBigCacheConfig = BigCacheConfig{ - Enable: true, - Expiration: time.Hour, - MaxEntriesInWindow: 1000, -} - -func BigCacheConfigAddOptions(prefix string, f *flag.FlagSet) { - f.Bool(prefix+".enable", DefaultBigCacheConfig.Enable, "Enable local in-memory caching of sequencer batch data") - f.Duration(prefix+".expiration", DefaultBigCacheConfig.Expiration, "Expiration time for in-memory cached sequencer batches") -} - -type BigCacheStorageService struct { - baseStorageService StorageService - bigCacheConfig BigCacheConfig - bigCache *bigcache.BigCache -} - -func NewBigCacheStorageService(bigCacheConfig BigCacheConfig, baseStorageService StorageService) (StorageService, error) { - conf := bigcache.DefaultConfig(bigCacheConfig.Expiration) - if bigCacheConfig.MaxEntriesInWindow > 0 { - conf.MaxEntriesInWindow = bigCacheConfig.MaxEntriesInWindow - } - bigCache, err := bigcache.NewBigCache(conf) - if err != nil { - return nil, err - } - return &BigCacheStorageService{ - baseStorageService: baseStorageService, - bigCacheConfig: bigCacheConfig, - bigCache: bigCache, - }, nil -} - -func (bcs *BigCacheStorageService) GetByHash(ctx context.Context, key common.Hash) ([]byte, error) { - log.Trace("das.BigCacheStorageService.GetByHash", "key", pretty.PrettyHash(key), "this", bcs) - - ret, err := bcs.bigCache.Get(string(key.Bytes())) - if err != nil { - ret, err = bcs.baseStorageService.GetByHash(ctx, key) - if err != nil { - return nil, err - } - - err = bcs.bigCache.Set(string(key.Bytes()), ret) - if err != nil { - return nil, err - } - return ret, err - } - - return ret, err -} - -func (bcs *BigCacheStorageService) Put(ctx context.Context, value []byte, timeout uint64) error { - logPut("das.BigCacheStorageService.Put", value, timeout, bcs) - err := bcs.baseStorageService.Put(ctx, value, timeout) - if err != nil { - return err - } - return bcs.bigCache.Set(string(dastree.HashBytes(value)), value) -} - -func (bcs *BigCacheStorageService) Sync(ctx context.Context) error { - return bcs.baseStorageService.Sync(ctx) -} - -func (bcs *BigCacheStorageService) Close(ctx context.Context) error { - err := bcs.bigCache.Close() - if err != nil { - return err - } - return bcs.baseStorageService.Close(ctx) -} - -func (bcs *BigCacheStorageService) ExpirationPolicy(ctx context.Context) (daprovider.ExpirationPolicy, error) { - return bcs.baseStorageService.ExpirationPolicy(ctx) -} - -func (bcs *BigCacheStorageService) String() string { - return fmt.Sprintf("BigCacheStorageService(%+v)", bcs.bigCacheConfig) -} - -func (bcs *BigCacheStorageService) HealthCheck(ctx context.Context) error { - return bcs.baseStorageService.HealthCheck(ctx) -} From 772b2edb823e882d09c7b1e9374a65ef49030975 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Mon, 29 Apr 2024 17:59:11 -0300 Subject: [PATCH 18/40] ExecutionEnginer do not call TransactionStreamer to broadcast messages --- arbnode/node.go | 4 --- arbnode/transaction_streamer.go | 37 ++++++++++++++----- broadcaster/broadcaster.go | 29 +++++++++++++++ execution/gethexec/executionengine.go | 51 ++++++++++++++------------- execution/gethexec/node.go | 4 +-- execution/interface.go | 5 ++- 6 files changed, 88 insertions(+), 42 deletions(-) diff --git a/arbnode/node.go b/arbnode/node.go index 33aed96584..8660d2e680 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -997,10 +997,6 @@ func (n *Node) GetFinalizedMsgCount(ctx context.Context) (arbutil.MessageIndex, return n.InboxReader.GetFinalizedMsgCount(ctx) } -func (n *Node) BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex, msgResult execution.MessageResult) { - n.TxStreamer.BroadcastMessage(msg, pos, msgResult) -} - func (n *Node) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult execution.MessageResult) error { return n.TxStreamer.WriteMessageFromSequencer(pos, msgWithMeta, msgResult) } diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index e925b60670..f75d1f7ccd 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -456,11 +456,20 @@ func (s *TransactionStreamer) reorg(batch ethdb.Batch, count arbutil.MessageInde s.reorgMutex.Lock() defer s.reorgMutex.Unlock() - err = s.exec.Reorg(count, newMessages, oldMessages) + messagesResults, err := s.exec.Reorg(count, newMessages, oldMessages) if err != nil { return err } + messagesWithBlockHash := make([]broadcaster.MessageWithMetadataAndBlockHash, 0, len(messagesResults)) + for i := 0; i < len(messagesResults); i++ { + messagesWithBlockHash = append(messagesWithBlockHash, broadcaster.MessageWithMetadataAndBlockHash{ + Message: newMessages[i], + BlockHash: &messagesResults[i].BlockHash, + }) + } + s.broadcastMessages(messagesWithBlockHash, count) + if s.validator != nil { err = s.validator.Reorg(s.GetContext(), count) if err != nil { @@ -997,7 +1006,12 @@ func (s *TransactionStreamer) WriteMessageFromSequencer( if err := s.writeMessages(pos, []arbostypes.MessageWithMetadata{msgWithMeta}, nil); err != nil { return err } - s.BroadcastMessage(msgWithMeta, pos, msgResult) + + msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ + Message: msgWithMeta, + BlockHash: &msgResult.BlockHash, + } + s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) return nil } @@ -1027,16 +1041,15 @@ func (s *TransactionStreamer) writeMessage(pos arbutil.MessageIndex, msg arbosty return batch.Put(key, msgBytes) } -func (s *TransactionStreamer) BroadcastMessage( - msg arbostypes.MessageWithMetadata, +func (s *TransactionStreamer) broadcastMessages( + msgs []broadcaster.MessageWithMetadataAndBlockHash, pos arbutil.MessageIndex, - msgResult execution.MessageResult, ) { if s.broadcastServer == nil { return } - if err := s.broadcastServer.BroadcastSingle(msg, pos, &msgResult.BlockHash); err != nil { - log.Error("failed broadcasting message", "pos", pos, "err", err) + if err := s.broadcastServer.BroadcastMessages(msgs, pos); err != nil { + log.Error("failed broadcasting messages", "pos", pos, "err", err) } } @@ -1118,7 +1131,8 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution } msgForPrefetch = msg } - if err = s.exec.DigestMessage(pos, msg, msgForPrefetch); err != nil { + msgResult, err := s.exec.DigestMessage(pos, msg, msgForPrefetch) + if err != nil { logger := log.Warn if prevMessageCount < msgCount { logger = log.Debug @@ -1126,6 +1140,13 @@ func (s *TransactionStreamer) ExecuteNextMsg(ctx context.Context, exec execution logger("feedOneMsg failed to send message to execEngine", "err", err, "pos", pos) return false } + + msgWithBlockHash := broadcaster.MessageWithMetadataAndBlockHash{ + Message: *msg, + BlockHash: &msgResult.BlockHash, + } + s.broadcastMessages([]broadcaster.MessageWithMetadataAndBlockHash{msgWithBlockHash}, pos) + return pos+1 < msgCount } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index ca412cce10..d2e959a67d 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -22,6 +22,11 @@ import ( "github.com/offchainlabs/nitro/wsbroadcastserver" ) +type MessageWithMetadataAndBlockHash struct { + Message arbostypes.MessageWithMetadata + BlockHash *common.Hash +} + type Broadcaster struct { server *wsbroadcastserver.WSBroadcastServer backlog backlog.Backlog @@ -84,6 +89,30 @@ func (b *Broadcaster) BroadcastSingle( return nil } +func (b *Broadcaster) BroadcastMessages( + messagesWithBlockHash []MessageWithMetadataAndBlockHash, + seq arbutil.MessageIndex, +) (err error) { + defer func() { + if r := recover(); r != nil { + log.Error("recovered error in BroadcastMessages", "recover", r, "backtrace", string(debug.Stack())) + err = errors.New("panic in BroadcastMessages") + } + }() + var feedMessages []*m.BroadcastFeedMessage + for i, msg := range messagesWithBlockHash { + bfm, err := b.NewBroadcastFeedMessage(msg.Message, seq+arbutil.MessageIndex(i), msg.BlockHash) + if err != nil { + return err + } + feedMessages = append(feedMessages, bfm) + } + + b.BroadcastFeedMessages(feedMessages) + + return nil +} + func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { broadcastFeedMessages := make([]*m.BroadcastFeedMessage, 0, 1) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index b94830ebbc..768b6c311a 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -107,9 +107,9 @@ func (s *ExecutionEngine) GetBatchFetcher() execution.BatchFetcher { return s.consensus } -func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { if count == 0 { - return errors.New("cannot reorg out genesis") + return nil, errors.New("cannot reorg out genesis") } s.createBlocksMutex.Lock() resequencing := false @@ -125,22 +125,25 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost targetBlock := s.bc.GetBlockByNumber(uint64(blockNum)) if targetBlock == nil { log.Warn("reorg target block not found", "block", blockNum) - return nil + return nil, nil } err := s.bc.ReorgToOldBlock(targetBlock) if err != nil { - return err + return nil, err } + + newMessagesResults := make([]*execution.MessageResult, 0, len(oldMessages)) for i := range newMessages { var msgForPrefetch *arbostypes.MessageWithMetadata if i < len(newMessages)-1 { msgForPrefetch = &newMessages[i] } - err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) + msgResult, err := s.digestMessageWithBlockMutex(count+arbutil.MessageIndex(i), &newMessages[i], msgForPrefetch) if err != nil { - return err + return nil, err } + newMessagesResults = append(newMessagesResults, msgResult) } if s.recorder != nil { s.recorder.ReorgTo(targetBlock.Header()) @@ -149,7 +152,7 @@ func (s *ExecutionEngine) Reorg(count arbutil.MessageIndex, newMessages []arbost s.resequenceChan <- oldMessages resequencing = true } - return nil + return newMessagesResults, nil } func (s *ExecutionEngine) getCurrentHeader() (*types.Header, error) { @@ -597,25 +600,25 @@ func (s *ExecutionEngine) cacheL1PriceDataOfMsg(num arbutil.MessageIndex, receip // in parallel, creates a block by executing msgForPrefetch (msg+1) against the latest state // but does not store the block. // This helps in filling the cache, so that the next block creation is faster. -func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { if !s.createBlocksMutex.TryLock() { - return errors.New("createBlock mutex held") + return nil, errors.New("createBlock mutex held") } defer s.createBlocksMutex.Unlock() return s.digestMessageWithBlockMutex(num, msg, msgForPrefetch) } -func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { +func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { currentHeader, err := s.getCurrentHeader() if err != nil { - return err + return nil, err } curMsg, err := s.BlockNumberToMessageIndex(currentHeader.Number.Uint64()) if err != nil { - return err + return nil, err } if curMsg+1 != num { - return fmt.Errorf("wrong message number in digest got %d expected %d", num, curMsg+1) + return nil, fmt.Errorf("wrong message number in digest got %d expected %d", num, curMsg+1) } startTime := time.Now() @@ -630,30 +633,23 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, block, statedb, receipts, err := s.createBlockFromNextMessage(msg, false) if err != nil { - return err - } - - if s.consensus != nil { - msgResult := execution.MessageResult{ - BlockHash: block.Hash(), - } - s.consensus.BroadcastMessage(*msg, num, msgResult) + return nil, err } err = s.appendBlock(block, statedb, receipts, time.Since(startTime)) if err != nil { - return err + return nil, err } if time.Now().After(s.nextScheduledVersionCheck) { s.nextScheduledVersionCheck = time.Now().Add(time.Minute) arbState, err := arbosState.OpenSystemArbosState(statedb, nil, true) if err != nil { - return err + return nil, err } version, timestampInt, err := arbState.GetScheduledUpgrade() if err != nil { - return err + return nil, err } var timeUntilUpgrade time.Duration var timestamp time.Time @@ -689,7 +685,12 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, case s.newBlockNotifier <- struct{}{}: default: } - return nil + + msgResult := execution.MessageResult{ + BlockHash: block.Hash(), + } + + return &msgResult, nil } func (s *ExecutionEngine) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 54f9ed6fe1..ae76b88530 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -343,10 +343,10 @@ func (n *ExecutionNode) StopAndWait() { // } } -func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error { +func (n *ExecutionNode) DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*execution.MessageResult, error) { return n.ExecEngine.DigestMessage(num, msg, msgForPrefetch) } -func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error { +func (n *ExecutionNode) Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*execution.MessageResult, error) { return n.ExecEngine.Reorg(count, newMessages, oldMessages) } func (n *ExecutionNode) HeadMessageNumber() (arbutil.MessageIndex, error) { diff --git a/execution/interface.go b/execution/interface.go index ff6c4c7d90..32e2cd7d45 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -28,8 +28,8 @@ var ErrSequencerInsertLockTaken = errors.New("insert lock taken") // always needed type ExecutionClient interface { - DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) error - Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) error + DigestMessage(num arbutil.MessageIndex, msg *arbostypes.MessageWithMetadata, msgForPrefetch *arbostypes.MessageWithMetadata) (*MessageResult, error) + Reorg(count arbutil.MessageIndex, newMessages []arbostypes.MessageWithMetadata, oldMessages []*arbostypes.MessageWithMetadata) ([]*MessageResult, error) HeadMessageNumber() (arbutil.MessageIndex, error) HeadMessageNumberSync(t *testing.T) (arbutil.MessageIndex, error) ResultAtPos(pos arbutil.MessageIndex) (*MessageResult, error) @@ -90,7 +90,6 @@ type ConsensusInfo interface { } type ConsensusSequencer interface { - BroadcastMessage(msg arbostypes.MessageWithMetadata, pos arbutil.MessageIndex, msgResult MessageResult) WriteMessageFromSequencer(pos arbutil.MessageIndex, msgWithMeta arbostypes.MessageWithMetadata, msgResult MessageResult) error ExpectChosenSequencer() error CacheL1PriceDataOfMsg(pos arbutil.MessageIndex, callDataUnits uint64, l1GasCharged uint64) From ddd35d0112eb2e0e5d8d1b4cef2252f691907f8b Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Tue, 30 Apr 2024 12:12:04 +0200 Subject: [PATCH 19/40] Add test to check p256Verify is enabled from arbOS 30 --- arbos/arbosState/arbosstate.go | 4 ++-- system_tests/outbox_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index f7b7f0e7f6..7a6941f0ac 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -163,7 +163,7 @@ var ( ) // Returns a list of precompiles that only appear in Arbitrum chains (i.e. ArbOS precompiles) at the genesis block -func getArbitrumOnlyGenesisPrecompiles(chainConfig *params.ChainConfig) []common.Address { +func GetArbitrumOnlyGenesisPrecompiles(chainConfig *params.ChainConfig) []common.Address { rules := chainConfig.Rules(big.NewInt(0), false, 0, chainConfig.ArbitrumChainParams.InitialArbOSVersion) arbPrecompiles := vm.ActivePrecompiles(rules) rules.IsArbitrum = false @@ -204,7 +204,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p // Solidity requires call targets have code, but precompiles don't. // To work around this, we give precompiles fake code. - for _, genesisPrecompile := range getArbitrumOnlyGenesisPrecompiles(chainConfig) { + for _, genesisPrecompile := range GetArbitrumOnlyGenesisPrecompiles(chainConfig) { stateDB.SetCode(genesisPrecompile, []byte{byte(vm.INVALID)}) } diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index d0ca0ccda3..e80c837b58 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -16,6 +16,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -23,6 +25,37 @@ import ( "github.com/offchainlabs/nitro/util/merkletree" ) +func TestP256VerifyEnabled(t *testing.T) { + gethhook.RequireHookedGeth() + for _, tc := range []struct { + arbOSVersion uint64 + wantP256Verify bool + }{ + { + arbOSVersion: 20, + wantP256Verify: false, + }, + { + arbOSVersion: 30, + wantP256Verify: true, + }, + } { + addresses := arbosState.GetArbitrumOnlyGenesisPrecompiles(¶ms.ChainConfig{ + ArbitrumChainParams: params.ArbitrumChainParams{ + EnableArbOS: true, + InitialArbOSVersion: tc.arbOSVersion, + }, + }) + got := false + for _, a := range addresses { + got = got || (a == common.BytesToAddress([]byte{0x01, 0x00})) + } + if got != tc.wantP256Verify { + t.Errorf("Got P256Verify enabled: %t, want: %t", got, tc.wantP256Verify) + } + } +} + func TestOutboxProofs(t *testing.T) { t.Parallel() gethhook.RequireHookedGeth() From bcb915c7b00bbe25637528101aa4abc5746f25c1 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 30 Apr 2024 10:27:17 -0300 Subject: [PATCH 20/40] TestSeqCoordinatorOutputFeed --- system_tests/seq_coordinator_test.go | 89 ++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 886a0528c7..75a3aefd73 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "net" "testing" "time" @@ -345,3 +346,91 @@ func TestRedisSeqCoordinatorMessageSync(t *testing.T) { func TestRedisSeqCoordinatorWrongKeyMessageSync(t *testing.T) { testCoordinatorMessageSync(t, false) } + +func TestSeqCoordinatorOutputFeed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Init redis + redisUrl := redisutil.CreateTestRedis(ctx, t) + seqAndSeqCoordinatorNodeNames := []string{"stdio://A", "stdio://B"} + initRedisForTest(t, ctx, redisUrl, seqAndSeqCoordinatorNodeNames) + + // build sequencer + builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, true) + builderSeq.nodeConfig.SeqCoordinator.Enable = true + builderSeq.nodeConfig.SeqCoordinator.RedisUrl = redisUrl + builderSeq.nodeConfig.SeqCoordinator.MyUrl = seqAndSeqCoordinatorNodeNames[0] + builderSeq.nodeConfig.BatchPoster.Enable = false + cleanupSeq := builderSeq.Build(t) + defer cleanupSeq() + testClientSeq := builderSeq.L2 + + // wait for sequencer to become master + redisClient, err := redisutil.RedisClientFromURL(builderSeq.nodeConfig.SeqCoordinator.RedisUrl) + Require(t, err) + defer redisClient.Close() + for { + err := redisClient.Get(ctx, redisutil.CHOSENSEQ_KEY).Err() + if errors.Is(err, redis.Nil) { + time.Sleep(builderSeq.nodeConfig.SeqCoordinator.UpdateInterval) + continue + } + Require(t, err) + break + } + + builderSeq.L2Info.GenerateAccount("User2") + + // build sequencer coordinator + builderSeqCoordinator := NewNodeBuilder(ctx).DefaultConfig(t, true) + nodeConfigDup := *builderSeq.nodeConfig + builderSeqCoordinator.nodeConfig = &nodeConfigDup + builderSeqCoordinator.nodeConfig.SeqCoordinator.MyUrl = seqAndSeqCoordinatorNodeNames[1] + builderSeqCoordinator.nodeConfig.Feed.Output = *newBroadcasterConfigTest() + cleanupSeqCoordinator := builderSeqCoordinator.Build(t) + defer cleanupSeqCoordinator() + testClientSeqCoordinator := builderSeqCoordinator.L2 + + seqCoordinatorOutputFeedPort := builderSeqCoordinator.L2.ConsensusNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port + + // build sequencer coordinator output feed reader + builderSeqCoordinatorOutputFeedReader := NewNodeBuilder(ctx).DefaultConfig(t, false) + builderSeqCoordinatorOutputFeedReader.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(seqCoordinatorOutputFeedPort) + builderSeqCoordinatorOutputFeedReader.takeOwnership = false + cleanupSeqCoordinatorOutputFeedReader := builderSeqCoordinatorOutputFeedReader.Build(t) + defer cleanupSeqCoordinatorOutputFeedReader() + testClientSeqCoordinatorOutputFeedReader := builderSeqCoordinatorOutputFeedReader.L2 + + // send transaction on the sequencer + tx := builderSeq.L2Info.PrepareTx("Owner", "User2", builderSeq.L2Info.TransferGas, big.NewInt(1e12), nil) + err = builderSeq.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + + // ensure transaction succeeds on the sequencer + _, err = builderSeq.L2.EnsureTxSucceeded(tx) + Require(t, err) + l2balance, err := testClientSeq.Client.BalanceAt(ctx, builderSeq.L2Info.GetAddress("User2"), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + + // ensure transaction succeeds on the sequencer coordinator + _, err = WaitForTx(ctx, testClientSeqCoordinator.Client, tx.Hash(), time.Second*5) + Require(t, err) + l2balance, err = testClientSeqCoordinator.Client.BalanceAt(ctx, builderSeq.L2Info.GetAddress("User2"), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } + + // ensure transaction succeeds on the sequencer coordinator output feed reader + _, err = WaitForTx(ctx, testClientSeqCoordinatorOutputFeedReader.Client, tx.Hash(), time.Second*5) + Require(t, err) + l2balance, err = testClientSeqCoordinatorOutputFeedReader.Client.BalanceAt(ctx, builderSeq.L2Info.GetAddress("User2"), nil) + Require(t, err) + if l2balance.Cmp(big.NewInt(1e12)) != 0 { + t.Fatal("Unexpected balance:", l2balance) + } +} From 873f8391870529894a25951f90154d148c2f199b Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 30 Apr 2024 10:33:51 -0300 Subject: [PATCH 21/40] revert order of funcs --- broadcaster/broadcaster.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index d2e959a67d..ac5c6c39da 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -89,6 +89,14 @@ func (b *Broadcaster) BroadcastSingle( return nil } +func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { + broadcastFeedMessages := make([]*m.BroadcastFeedMessage, 0, 1) + + broadcastFeedMessages = append(broadcastFeedMessages, bfm) + + b.BroadcastFeedMessages(broadcastFeedMessages) +} + func (b *Broadcaster) BroadcastMessages( messagesWithBlockHash []MessageWithMetadataAndBlockHash, seq arbutil.MessageIndex, @@ -113,14 +121,6 @@ func (b *Broadcaster) BroadcastMessages( return nil } -func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { - broadcastFeedMessages := make([]*m.BroadcastFeedMessage, 0, 1) - - broadcastFeedMessages = append(broadcastFeedMessages, bfm) - - b.BroadcastFeedMessages(broadcastFeedMessages) -} - func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) { bm := &m.BroadcastMessage{ From 086c74a5671d7f3b363927122fd9028a0a14cbbc Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Fri, 3 May 2024 15:54:37 +0200 Subject: [PATCH 22/40] Merge master --- system_tests/outbox_test.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/system_tests/outbox_test.go b/system_tests/outbox_test.go index e80c837b58..739d756a31 100644 --- a/system_tests/outbox_test.go +++ b/system_tests/outbox_test.go @@ -15,9 +15,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" @@ -28,26 +28,20 @@ import ( func TestP256VerifyEnabled(t *testing.T) { gethhook.RequireHookedGeth() for _, tc := range []struct { - arbOSVersion uint64 + stylusEnabled bool wantP256Verify bool }{ { - arbOSVersion: 20, + stylusEnabled: false, wantP256Verify: false, }, { - arbOSVersion: 30, + stylusEnabled: true, wantP256Verify: true, }, } { - addresses := arbosState.GetArbitrumOnlyGenesisPrecompiles(¶ms.ChainConfig{ - ArbitrumChainParams: params.ArbitrumChainParams{ - EnableArbOS: true, - InitialArbOSVersion: tc.arbOSVersion, - }, - }) got := false - for _, a := range addresses { + for _, a := range vm.ActivePrecompiles(params.Rules{IsStylus: tc.stylusEnabled}) { got = got || (a == common.BytesToAddress([]byte{0x01, 0x00})) } if got != tc.wantP256Verify { From ad7776873ba5dcd340a4fcaa2dcbfadf7323c939 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Fri, 3 May 2024 16:15:33 +0200 Subject: [PATCH 23/40] Add e2e test --- system_tests/transfer_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index a270cca76b..be091ba5ab 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -4,10 +4,14 @@ package arbtest import ( + "bytes" "context" "fmt" "math/big" "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" ) func TestTransfer(t *testing.T) { @@ -36,3 +40,27 @@ func TestTransfer(t *testing.T) { Fatal(t, "Unexpected recipient balance: ", bal2) } } + +func TestP256Verify(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = 30 + cleanup := builder.Build(t) + defer cleanup() + addr := common.BytesToAddress([]byte{0x01, 0x00}) + got, err := builder.L2.Client.CallContract(ctx, ethereum.CallMsg{ + From: builder.L2Info.GetAddress("Owner"), + To: &addr, + Gas: builder.L2Info.TransferGas, + Data: common.Hex2Bytes("4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e"), + Value: big.NewInt(1e12), + }, nil) + if err != nil { + t.Fatalf("Calling p256 precompile, unexpected error: %v", err) + } + want := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001") + if !bytes.Equal(got, want) { + t.Errorf("P256Verify() = %v, want: %v", got, want) + } +} From db9c77adfb2b211899f5039907be6a2bfb6634ab Mon Sep 17 00:00:00 2001 From: Pepper Lebeck-Jobe Date: Mon, 6 May 2024 10:54:17 +0200 Subject: [PATCH 24/40] Enable system tests to work from VS Code Debugger. Before this change, when launching some of the system tests from the VS Code debugger UI, the machine loader was unable to locate the jit binary because the system tests don't actually have "test" in path leading up to the test executable. --- validator/server_jit/machine_loader.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/validator/server_jit/machine_loader.go b/validator/server_jit/machine_loader.go index 3a831928b7..b2bdb65322 100644 --- a/validator/server_jit/machine_loader.go +++ b/validator/server_jit/machine_loader.go @@ -27,13 +27,16 @@ var DefaultJitMachineConfig = JitMachineConfig{ func getJitPath() (string, error) { var jitBinary string executable, err := os.Executable() + println("executable: ", executable) if err == nil { - if strings.Contains(filepath.Base(executable), "test") { + if strings.Contains(filepath.Base(executable), "test") || strings.Contains(filepath.Dir(executable), "system_tests") { _, thisfile, _, _ := runtime.Caller(0) projectDir := filepath.Dir(filepath.Dir(filepath.Dir(thisfile))) + println("projectDir: ", projectDir) jitBinary = filepath.Join(projectDir, "target", "bin", "jit") } else { jitBinary = filepath.Join(filepath.Dir(executable), "jit") + println("inside else: ", jitBinary) } _, err = os.Stat(jitBinary) } From ae26127b489ec75235aac8f18593be92945606a7 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Mon, 6 May 2024 14:15:02 +0200 Subject: [PATCH 25/40] Add test-case for arbOS 20 --- system_tests/transfer_test.go | 54 +++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index be091ba5ab..a49e059351 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -44,23 +44,41 @@ func TestTransfer(t *testing.T) { func TestP256Verify(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = 30 - cleanup := builder.Build(t) - defer cleanup() - addr := common.BytesToAddress([]byte{0x01, 0x00}) - got, err := builder.L2.Client.CallContract(ctx, ethereum.CallMsg{ - From: builder.L2Info.GetAddress("Owner"), - To: &addr, - Gas: builder.L2Info.TransferGas, - Data: common.Hex2Bytes("4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e"), - Value: big.NewInt(1e12), - }, nil) - if err != nil { - t.Fatalf("Calling p256 precompile, unexpected error: %v", err) - } - want := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001") - if !bytes.Equal(got, want) { - t.Errorf("P256Verify() = %v, want: %v", got, want) + for _, tc := range []struct { + desc string + initialVersion uint64 + want []byte + }{ + { + desc: "p256 should not be enabled on arbOS 20", + initialVersion: 20, + want: nil, + }, + { + desc: "p256 should be enabled on arbOS 20", + initialVersion: 30, + want: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = tc.initialVersion + cleanup := builder.Build(t) + defer cleanup() + addr := common.BytesToAddress([]byte{0x01, 0x00}) + got, err := builder.L2.Client.CallContract(ctx, ethereum.CallMsg{ + From: builder.L2Info.GetAddress("Owner"), + To: &addr, + Gas: builder.L2Info.TransferGas, + Data: common.Hex2Bytes("4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e"), + Value: big.NewInt(1e12), + }, nil) + if err != nil { + t.Fatalf("CallContract() unexpected error: %v", err) + } + if !bytes.Equal(got, tc.want) { + t.Errorf("P256Verify() = %v, want: %v", got, tc.want) + } + }) } } From 7b9691a6305548849005d344e07597a24307c2f0 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 6 May 2024 18:10:47 -0700 Subject: [PATCH 26/40] Change log-level cli opt to take a string The log-level cli opt on all executables built by this project now takes a string instead of an integer, and the strings are the typical CRIT, ERROR, WARN, INFO, DEBUG, TRACE. Additionally it accepts the old geth numeric log levels as an undocumented feature to avoid the need for anyone already using them to migrate their config. The introduction of slog as geth's logger changed the numeric constants used for the typical levels, so we make this change to avoid requiring any config changes and also to give clearer options going forward. --- cmd/daserver/daserver.go | 15 +++++++++------ cmd/genericconf/logging.go | 11 ++++++++--- cmd/genericconf/loglevel.go | 38 +++++++++++++++++++++++++++++++++++++ cmd/nitro-val/config.go | 8 ++++---- cmd/nitro-val/nitro_val.go | 5 ++--- cmd/nitro/nitro.go | 11 +++++------ cmd/relay/relay.go | 7 +++++-- relay/relay.go | 8 +++----- 8 files changed, 74 insertions(+), 29 deletions(-) create mode 100644 cmd/genericconf/loglevel.go diff --git a/cmd/daserver/daserver.go b/cmd/daserver/daserver.go index 3e96412648..8036487d26 100644 --- a/cmd/daserver/daserver.go +++ b/cmd/daserver/daserver.go @@ -14,8 +14,6 @@ import ( "syscall" "time" - "golang.org/x/exp/slog" - koanfjson "github.com/knadh/koanf/parsers/json" flag "github.com/spf13/pflag" @@ -46,7 +44,7 @@ type DAServerConfig struct { DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` Conf genericconf.ConfConfig `koanf:"conf"` - LogLevel int `koanf:"log-level"` + LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` @@ -66,7 +64,7 @@ var DefaultDAServerConfig = DAServerConfig{ RESTServerTimeouts: genericconf.HTTPServerTimeoutConfigDefault, DataAvailability: das.DefaultDataAvailabilityConfig, Conf: genericconf.ConfConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -103,7 +101,7 @@ func parseDAServer(args []string) (*DAServerConfig, error) { f.Bool("pprof", DefaultDAServerConfig.PProf, "enable pprof") genericconf.PProfAddOptions("pprof-cfg", f) - f.Int("log-level", int(log.LvlInfo), "log level; 1: ERROR, 2: WARN, 3: INFO, 4: DEBUG, 5: TRACE") + f.String("log-level", DefaultDAServerConfig.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", DefaultDAServerConfig.LogType, "log type (plaintext or json)") das.DataAvailabilityConfigAddDaserverOptions("data-availability", f) @@ -185,13 +183,18 @@ func startup() error { confighelpers.PrintErrorAndExit(errors.New("please specify at least one of --enable-rest or --enable-rpc"), printSampleUsage) } + logLevel, err := genericconf.ToSlogLevel(serverConfig.LogLevel) + if err != nil { + confighelpers.PrintErrorAndExit(err, printSampleUsage) + } + handler, err := genericconf.HandlerFromLogType(serverConfig.LogType, io.Writer(os.Stderr)) if err != nil { flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } glogger := log.NewGlogHandler(handler) - glogger.Verbosity(slog.Level(serverConfig.LogLevel)) + glogger.Verbosity(logLevel) log.SetDefault(log.NewLogger(glogger)) if err := startMetrics(serverConfig); err != nil { diff --git a/cmd/genericconf/logging.go b/cmd/genericconf/logging.go index d77071a0bf..fa45953278 100644 --- a/cmd/genericconf/logging.go +++ b/cmd/genericconf/logging.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/ethereum/go-ethereum/log" - "golang.org/x/exp/slog" "gopkg.in/natefinch/lumberjack.v2" ) @@ -90,7 +89,7 @@ func (l *fileLoggerFactory) close() error { } // initLog is not threadsafe -func InitLog(logType string, logLevel slog.Level, fileLoggingConfig *FileLoggingConfig, pathResolver func(string) string) error { +func InitLog(logType string, logLevel string, fileLoggingConfig *FileLoggingConfig, pathResolver func(string) string) error { var glogger *log.GlogHandler // always close previous instance of file logger if err := globalFileLoggerFactory.close(); err != nil { @@ -111,8 +110,14 @@ func InitLog(logType string, logLevel slog.Level, fileLoggingConfig *FileLogging flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } + slogLevel, err := ToSlogLevel(logLevel) + if err != nil { + flag.Usage() + return fmt.Errorf("error parsing log level: %w", err) + } + glogger = log.NewGlogHandler(handler) - glogger.Verbosity(logLevel) + glogger.Verbosity(slogLevel) log.SetDefault(log.NewLogger(glogger)) return nil } diff --git a/cmd/genericconf/loglevel.go b/cmd/genericconf/loglevel.go new file mode 100644 index 0000000000..f7ad05a2cc --- /dev/null +++ b/cmd/genericconf/loglevel.go @@ -0,0 +1,38 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package genericconf + +import ( + "errors" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/log" + "golang.org/x/exp/slog" +) + +func ToSlogLevel(str string) (slog.Level, error) { + switch strings.ToLower(str) { + case "trace": + return log.LevelTrace, nil + case "debug": + return log.LevelDebug, nil + case "info": + return log.LevelInfo, nil + case "warn": + return log.LevelWarn, nil + case "error": + return log.LevelError, nil + case "crit": + return log.LevelCrit, nil + default: + legacyLevel, err := strconv.Atoi(str) + if err != nil { + // Leave legacy geth numeric log levels undocumented, but if anyone happens + // to be using them, it will work. + return log.LevelTrace, errors.New("invalid log-level") + } + return log.FromLegacyLevel(legacyLevel), nil + } +} diff --git a/cmd/nitro-val/config.go b/cmd/nitro-val/config.go index 51d3978836..b52a1c6b5e 100644 --- a/cmd/nitro-val/config.go +++ b/cmd/nitro-val/config.go @@ -2,10 +2,10 @@ package main import ( "fmt" + "reflect" "time" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/nat" @@ -20,7 +20,7 @@ import ( type ValidationNodeConfig struct { Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"` Validation valnode.Config `koanf:"validation" reload:"hot"` - LogLevel int `koanf:"log-level" reload:"hot"` + LogLevel string `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` Persistent conf.PersistentConfig `koanf:"persistent"` @@ -61,7 +61,7 @@ var IPCConfigDefault = genericconf.IPCConfig{ var ValidationNodeConfigDefault = ValidationNodeConfig{ Conf: genericconf.ConfConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Persistent: conf.PersistentConfigDefault, HTTP: HTTPConfigDefault, @@ -79,7 +79,7 @@ var ValidationNodeConfigDefault = ValidationNodeConfig{ func ValidationNodeConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) valnode.ValidationConfigAddOptions("validation", f) - f.Int("log-level", ValidationNodeConfigDefault.LogLevel, "log level") + f.String("log-level", ValidationNodeConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", ValidationNodeConfigDefault.LogType, "log type (plaintext or json)") genericconf.FileLoggingConfigAddOptions("file-logging", f) conf.PersistentConfigAddOptions("persistent", f) diff --git a/cmd/nitro-val/nitro_val.go b/cmd/nitro-val/nitro_val.go index 4e543f7953..1e894336ea 100644 --- a/cmd/nitro-val/nitro_val.go +++ b/cmd/nitro-val/nitro_val.go @@ -22,7 +22,6 @@ import ( "github.com/offchainlabs/nitro/cmd/util/confighelpers" _ "github.com/offchainlabs/nitro/execution/nodeInterface" "github.com/offchainlabs/nitro/validator/valnode" - "golang.org/x/exp/slog" ) func printSampleUsage(name string) { @@ -90,7 +89,7 @@ func mainImpl() int { } } - err = genericconf.InitLog(nodeConfig.LogType, slog.Level(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + err = genericconf.InitLog(nodeConfig.LogType, nodeConfig.LogLevel, &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 @@ -109,7 +108,7 @@ func mainImpl() int { liveNodeConfig := genericconf.NewLiveConfig[*ValidationNodeConfig](args, nodeConfig, ParseNode) liveNodeConfig.SetOnReloadHook(func(oldCfg *ValidationNodeConfig, newCfg *ValidationNodeConfig) error { - return genericconf.InitLog(newCfg.LogType, slog.Level(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + return genericconf.InitLog(newCfg.LogType, newCfg.LogLevel, &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) }) valnode.EnsureValidationExposedViaAuthRPC(&stackConf) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index df0feca8ee..62b13693b8 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -63,7 +63,6 @@ import ( "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/validator/server_common" "github.com/offchainlabs/nitro/validator/valnode" - "golang.org/x/exp/slog" ) func printSampleUsage(name string) { @@ -208,7 +207,7 @@ func mainImpl() int { } stackConf.JWTSecret = filename } - err = genericconf.InitLog(nodeConfig.LogType, slog.Level(nodeConfig.LogLevel), &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) + err = genericconf.InitLog(nodeConfig.LogType, nodeConfig.LogLevel, &nodeConfig.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)) if err != nil { fmt.Fprintf(os.Stderr, "Error initializing logging: %v\n", err) return 1 @@ -600,7 +599,7 @@ func mainImpl() int { } liveNodeConfig.SetOnReloadHook(func(oldCfg *NodeConfig, newCfg *NodeConfig) error { - if err := genericconf.InitLog(newCfg.LogType, slog.Level(newCfg.LogLevel), &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { + if err := genericconf.InitLog(newCfg.LogType, newCfg.LogLevel, &newCfg.FileLogging, pathResolver(nodeConfig.Persistent.LogDir)); err != nil { return fmt.Errorf("failed to re-init logging: %w", err) } return currentNode.OnConfigReload(&oldCfg.Node, &newCfg.Node) @@ -691,7 +690,7 @@ type NodeConfig struct { Validation valnode.Config `koanf:"validation" reload:"hot"` ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"` Chain conf.L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level" reload:"hot"` + LogLevel string `koanf:"log-level" reload:"hot"` LogType string `koanf:"log-type" reload:"hot"` FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"` Persistent conf.PersistentConfig `koanf:"persistent"` @@ -717,7 +716,7 @@ var NodeConfigDefault = NodeConfig{ Validation: valnode.DefaultValidationConfig, ParentChain: conf.L1ConfigDefault, Chain: conf.L2ConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", FileLogging: genericconf.DefaultFileLoggingConfig, Persistent: conf.PersistentConfigDefault, @@ -743,7 +742,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) { valnode.ValidationConfigAddOptions("validation", f) conf.L1ConfigAddOptions("parent-chain", f) conf.L2ConfigAddOptions("chain", f) - f.Int("log-level", NodeConfigDefault.LogLevel, "log level") + f.String("log-level", NodeConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", NodeConfigDefault.LogType, "log type (plaintext or json)") genericconf.FileLoggingConfigAddOptions("file-logging", f) conf.PersistentConfigAddOptions("persistent", f) diff --git a/cmd/relay/relay.go b/cmd/relay/relay.go index 5a7499e691..6f786f976a 100644 --- a/cmd/relay/relay.go +++ b/cmd/relay/relay.go @@ -20,7 +20,6 @@ import ( "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/relay" - "golang.org/x/exp/slog" ) func main() { @@ -69,8 +68,12 @@ func startup() error { flag.Usage() return fmt.Errorf("error parsing log type when creating handler: %w", err) } + logLevel, err := genericconf.ToSlogLevel(relayConfig.LogLevel) + if err != nil { + confighelpers.PrintErrorAndExit(err, printSampleUsage) + } glogger := log.NewGlogHandler(handler) - glogger.Verbosity(slog.Level(relayConfig.LogLevel)) + glogger.Verbosity(logLevel) log.SetDefault(log.NewLogger(glogger)) vcsRevision, _, vcsTime := confighelpers.GetVersion() diff --git a/relay/relay.go b/relay/relay.go index 8e29971384..89bb899f29 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -10,8 +10,6 @@ import ( flag "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -120,7 +118,7 @@ func (r *Relay) StopAndWait() { type Config struct { Conf genericconf.ConfConfig `koanf:"conf"` Chain L2Config `koanf:"chain"` - LogLevel int `koanf:"log-level"` + LogLevel string `koanf:"log-level"` LogType string `koanf:"log-type"` Metrics bool `koanf:"metrics"` MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"` @@ -133,7 +131,7 @@ type Config struct { var ConfigDefault = Config{ Conf: genericconf.ConfConfigDefault, Chain: L2ConfigDefault, - LogLevel: int(log.LvlInfo), + LogLevel: "INFO", LogType: "plaintext", Metrics: false, MetricsServer: genericconf.MetricsServerConfigDefault, @@ -146,7 +144,7 @@ var ConfigDefault = Config{ func ConfigAddOptions(f *flag.FlagSet) { genericconf.ConfConfigAddOptions("conf", f) L2ConfigAddOptions("chain", f) - f.Int("log-level", ConfigDefault.LogLevel, "log level") + f.String("log-level", ConfigDefault.LogLevel, "log level, valid values are CRIT, ERROR, WARN, INFO, DEBUG, TRACE") f.String("log-type", ConfigDefault.LogType, "log type") f.Bool("metrics", ConfigDefault.Metrics, "enable metrics") genericconf.MetricsServerAddOptions("metrics-server", f) From 60fb384012826a4ccc39d60eab86e848097f8461 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 7 May 2024 11:15:05 -0300 Subject: [PATCH 27/40] get MessageResult from resultFromHeader instead of building it --- execution/gethexec/executionengine.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 768b6c311a..e8850bb8fc 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -370,11 +370,12 @@ func (s *ExecutionEngine) sequenceTransactionsWithBlockMutex(header *arbostypes. Message: msg, DelayedMessagesRead: delayedMessagesRead, } - msgResult := execution.MessageResult{ - BlockHash: block.Hash(), + msgResult, err := s.resultFromHeader(block.Header()) + if err != nil { + return nil, err } - err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, msgResult) + err = s.consensus.WriteMessageFromSequencer(pos, msgWithMeta, *msgResult) if err != nil { return nil, err } @@ -427,11 +428,12 @@ func (s *ExecutionEngine) sequenceDelayedMessageWithBlockMutex(message *arbostyp } blockCalcTime := time.Since(startTime) - msgResult := execution.MessageResult{ - BlockHash: block.Hash(), + msgResult, err := s.resultFromHeader(block.Header()) + if err != nil { + return nil, err } - err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, msgResult) + err = s.consensus.WriteMessageFromSequencer(lastMsg+1, messageWithMeta, *msgResult) if err != nil { return nil, err } From e5504c437b6898d913083a82cc49ce2527fb8d12 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 7 May 2024 11:27:05 -0300 Subject: [PATCH 28/40] tries to get block hash when populating feed backlog --- arbnode/inbox_tracker.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index d1bc8f9ed1..8de8f5d583 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -302,7 +302,14 @@ func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcas if err != nil { return fmt.Errorf("error getting message %v: %w", seqNum, err) } - feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum, nil) + + msgResult, err := t.txStreamer.ResultAtCount(seqNum) + var blockHash *common.Hash + if err == nil { + blockHash = &msgResult.BlockHash + } + + feedMessage, err := broadcastServer.NewBroadcastFeedMessage(*message, seqNum, blockHash) if err != nil { return fmt.Errorf("error creating broadcast feed message %v: %w", seqNum, err) } From 9257595f28f05ee6284d9edd0eb65a956ebcc28a Mon Sep 17 00:00:00 2001 From: Rachel Bousfield Date: Tue, 7 May 2024 09:35:48 -0600 Subject: [PATCH 29/40] code quality improvements --- arbitrator/arbutil/src/evm/api.rs | 2 +- arbitrator/prover/src/binary.rs | 6 +++--- arbitrator/prover/src/machine.rs | 2 +- go-ethereum | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arbitrator/arbutil/src/evm/api.rs b/arbitrator/arbutil/src/evm/api.rs index f84f92ad96..093e7f2984 100644 --- a/arbitrator/arbutil/src/evm/api.rs +++ b/arbitrator/arbutil/src/evm/api.rs @@ -153,7 +153,7 @@ pub trait EvmApi: Send + 'static { ) -> (eyre::Result, u32, u64); /// Returns the EVM return data. - /// Analogous to `vm.RETURNDATA`. + /// Analogous to `vm.RETURNDATACOPY`. fn get_return_data(&self) -> D; /// Emits an EVM log with the given number of topics and data, the first bytes of which should be the topic data. diff --git a/arbitrator/prover/src/binary.rs b/arbitrator/prover/src/binary.rs index 18f9ecec09..f6c3e9fe8f 100644 --- a/arbitrator/prover/src/binary.rs +++ b/arbitrator/prover/src/binary.rs @@ -627,9 +627,9 @@ impl<'a> WasmBinary<'a> { ink_left: ink_left.as_u32(), ink_status: ink_status.as_u32(), depth_left: depth_left.as_u32(), - init_cost: init.try_into()?, - cached_init_cost: cached_init.try_into()?, - asm_estimate: asm_estimate.try_into()?, + init_cost: init.try_into().wrap_err("init cost too high")?, + cached_init_cost: cached_init.try_into().wrap_err("cached cost too high")?, + asm_estimate: asm_estimate.try_into().wrap_err("asm estimate too large")?, footprint, user_main, }) diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index fd7e22e1b2..5466c7f790 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -1729,7 +1729,7 @@ impl Machine { pub fn jump_into_func(&mut self, module: u32, func: u32, mut args: Vec) -> Result<()> { let Some(source_module) = self.modules.get(module as usize) else { - bail!("no module at offest {}", module.red()) + bail!("no module at offset {}", module.red()) }; let Some(source_func) = source_module.funcs.get(func as usize) else { bail!( diff --git a/go-ethereum b/go-ethereum index 72f81daa8c..da519ddc4f 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 72f81daa8c59f044246b6e1f3eca08187edd7417 +Subproject commit da519ddc4fd5113a46da734e41b37369a1dce098 From b03686ff09fbae5ff9e25ac3517711d9657fe487 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Tue, 7 May 2024 13:01:07 -0300 Subject: [PATCH 30/40] fix: not using resultFromHeader in one place --- execution/gethexec/executionengine.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index e8850bb8fc..21735c817a 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -688,11 +688,11 @@ func (s *ExecutionEngine) digestMessageWithBlockMutex(num arbutil.MessageIndex, default: } - msgResult := execution.MessageResult{ - BlockHash: block.Hash(), + msgResult, err := s.resultFromHeader(block.Header()) + if err != nil { + return nil, err } - - return &msgResult, nil + return msgResult, nil } func (s *ExecutionEngine) ArbOSVersionForMessageNumber(messageNum arbutil.MessageIndex) (uint64, error) { From 794c199ad251f1913e0f052159388083005c3fc2 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 7 May 2024 11:19:13 -0700 Subject: [PATCH 31/40] Add extra DataPoster log first time batch is sent --- arbnode/dataposter/data_poster.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 614711249b..6de8013e79 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -844,14 +844,15 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti if err != nil { return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } + var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 if precedingTx != nil && // precedingTx == nil -> the actual preceding tx was already confirmed (precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent) { - latestBlockNumber, err := p.client.BlockNumber(ctx) + latestBlockNumber, err = p.client.BlockNumber(ctx) if err != nil { return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - prevBlockNumber := arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantNonce, err := p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) if err != nil { return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } @@ -860,6 +861,8 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) return nil } + } else { + log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) } } From a48123048ed2dbddf672fcfa4a896696571bd48e Mon Sep 17 00:00:00 2001 From: Rachel Bousfield Date: Wed, 8 May 2024 10:27:08 -0600 Subject: [PATCH 32/40] error early when missing asm --- arbos/programs/native.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arbos/programs/native.go b/arbos/programs/native.go index 123dda54ce..09989f3380 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -114,6 +114,11 @@ func callProgram( asm := db.GetActivatedAsm(moduleHash) debug := stylusParams.debugMode + if len(asm) == 0 { + log.Error("missing asm", "program", address, "module", moduleHash) + panic("missing asm") + } + if db, ok := db.(*state.StateDB); ok { db.RecordProgram(moduleHash) } From 819f7501c3189172ee3142e7c655b94b57572bb0 Mon Sep 17 00:00:00 2001 From: Diego Ximenes Date: Wed, 8 May 2024 16:17:21 -0300 Subject: [PATCH 33/40] removes TestSeqCoordinatorOutputFeed --- system_tests/seq_coordinator_test.go | 89 ---------------------------- 1 file changed, 89 deletions(-) diff --git a/system_tests/seq_coordinator_test.go b/system_tests/seq_coordinator_test.go index 75a3aefd73..886a0528c7 100644 --- a/system_tests/seq_coordinator_test.go +++ b/system_tests/seq_coordinator_test.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "math/big" - "net" "testing" "time" @@ -346,91 +345,3 @@ func TestRedisSeqCoordinatorMessageSync(t *testing.T) { func TestRedisSeqCoordinatorWrongKeyMessageSync(t *testing.T) { testCoordinatorMessageSync(t, false) } - -func TestSeqCoordinatorOutputFeed(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Init redis - redisUrl := redisutil.CreateTestRedis(ctx, t) - seqAndSeqCoordinatorNodeNames := []string{"stdio://A", "stdio://B"} - initRedisForTest(t, ctx, redisUrl, seqAndSeqCoordinatorNodeNames) - - // build sequencer - builderSeq := NewNodeBuilder(ctx).DefaultConfig(t, true) - builderSeq.nodeConfig.SeqCoordinator.Enable = true - builderSeq.nodeConfig.SeqCoordinator.RedisUrl = redisUrl - builderSeq.nodeConfig.SeqCoordinator.MyUrl = seqAndSeqCoordinatorNodeNames[0] - builderSeq.nodeConfig.BatchPoster.Enable = false - cleanupSeq := builderSeq.Build(t) - defer cleanupSeq() - testClientSeq := builderSeq.L2 - - // wait for sequencer to become master - redisClient, err := redisutil.RedisClientFromURL(builderSeq.nodeConfig.SeqCoordinator.RedisUrl) - Require(t, err) - defer redisClient.Close() - for { - err := redisClient.Get(ctx, redisutil.CHOSENSEQ_KEY).Err() - if errors.Is(err, redis.Nil) { - time.Sleep(builderSeq.nodeConfig.SeqCoordinator.UpdateInterval) - continue - } - Require(t, err) - break - } - - builderSeq.L2Info.GenerateAccount("User2") - - // build sequencer coordinator - builderSeqCoordinator := NewNodeBuilder(ctx).DefaultConfig(t, true) - nodeConfigDup := *builderSeq.nodeConfig - builderSeqCoordinator.nodeConfig = &nodeConfigDup - builderSeqCoordinator.nodeConfig.SeqCoordinator.MyUrl = seqAndSeqCoordinatorNodeNames[1] - builderSeqCoordinator.nodeConfig.Feed.Output = *newBroadcasterConfigTest() - cleanupSeqCoordinator := builderSeqCoordinator.Build(t) - defer cleanupSeqCoordinator() - testClientSeqCoordinator := builderSeqCoordinator.L2 - - seqCoordinatorOutputFeedPort := builderSeqCoordinator.L2.ConsensusNode.BroadcastServer.ListenerAddr().(*net.TCPAddr).Port - - // build sequencer coordinator output feed reader - builderSeqCoordinatorOutputFeedReader := NewNodeBuilder(ctx).DefaultConfig(t, false) - builderSeqCoordinatorOutputFeedReader.nodeConfig.Feed.Input = *newBroadcastClientConfigTest(seqCoordinatorOutputFeedPort) - builderSeqCoordinatorOutputFeedReader.takeOwnership = false - cleanupSeqCoordinatorOutputFeedReader := builderSeqCoordinatorOutputFeedReader.Build(t) - defer cleanupSeqCoordinatorOutputFeedReader() - testClientSeqCoordinatorOutputFeedReader := builderSeqCoordinatorOutputFeedReader.L2 - - // send transaction on the sequencer - tx := builderSeq.L2Info.PrepareTx("Owner", "User2", builderSeq.L2Info.TransferGas, big.NewInt(1e12), nil) - err = builderSeq.L2.Client.SendTransaction(ctx, tx) - Require(t, err) - - // ensure transaction succeeds on the sequencer - _, err = builderSeq.L2.EnsureTxSucceeded(tx) - Require(t, err) - l2balance, err := testClientSeq.Client.BalanceAt(ctx, builderSeq.L2Info.GetAddress("User2"), nil) - Require(t, err) - if l2balance.Cmp(big.NewInt(1e12)) != 0 { - t.Fatal("Unexpected balance:", l2balance) - } - - // ensure transaction succeeds on the sequencer coordinator - _, err = WaitForTx(ctx, testClientSeqCoordinator.Client, tx.Hash(), time.Second*5) - Require(t, err) - l2balance, err = testClientSeqCoordinator.Client.BalanceAt(ctx, builderSeq.L2Info.GetAddress("User2"), nil) - Require(t, err) - if l2balance.Cmp(big.NewInt(1e12)) != 0 { - t.Fatal("Unexpected balance:", l2balance) - } - - // ensure transaction succeeds on the sequencer coordinator output feed reader - _, err = WaitForTx(ctx, testClientSeqCoordinatorOutputFeedReader.Client, tx.Hash(), time.Second*5) - Require(t, err) - l2balance, err = testClientSeqCoordinatorOutputFeedReader.Client.BalanceAt(ctx, builderSeq.L2Info.GetAddress("User2"), nil) - Require(t, err) - if l2balance.Cmp(big.NewInt(1e12)) != 0 { - t.Fatal("Unexpected balance:", l2balance) - } -} From e7e359a9862b84b33c392e7d83310ff5aac0cf97 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Wed, 8 May 2024 13:57:15 -0700 Subject: [PATCH 34/40] More descriptive jit machine accept() errors --- validator/server_jit/jit_machine.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/server_jit/jit_machine.go b/validator/server_jit/jit_machine.go index 8a85aa7115..1a3ccfa340 100644 --- a/validator/server_jit/jit_machine.go +++ b/validator/server_jit/jit_machine.go @@ -98,7 +98,7 @@ func (machine *JitMachine) prove( // Wait for the forked process to connect conn, err := tcp.Accept() if err != nil { - return state, err + return state, fmt.Errorf("error waiting for jit machine to connect back to validator: %w", err) } go func() { <-ctx.Done() From 3b05dcb9cb392805c6c8e524d52c97ff88afd89b Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Thu, 9 May 2024 14:47:23 -0500 Subject: [PATCH 35/40] Merge v1.13.12 --- go-ethereum | 2 +- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go-ethereum b/go-ethereum index da519ddc4f..9874ec397a 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit da519ddc4fd5113a46da734e41b37369a1dce098 +Subproject commit 9874ec397a5b499eefc98f7f9ae9632c3fc1e17f diff --git a/go.mod b/go.mod index 22b6b8b4af..6b350a4008 100644 --- a/go.mod +++ b/go.mod @@ -92,7 +92,7 @@ require ( github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect - github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect + github.com/fjl/memsize v0.0.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gammazero/deque v0.2.1 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect diff --git a/go.sum b/go.sum index 9d685c0abc..1b63dfb496 100644 --- a/go.sum +++ b/go.sum @@ -233,8 +233,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= -github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= +github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= From debfb75f382f5af42f90e0ee8e0a28398f2f396c Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 9 May 2024 17:14:34 -0700 Subject: [PATCH 36/40] Fix logging npe --- arbnode/dataposter/data_poster.go | 35 ++++++++++++++++--------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 6de8013e79..b34552a9b9 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -844,25 +844,26 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti if err != nil { return fmt.Errorf("couldn't get preceding tx in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) } - var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 - if precedingTx != nil && // precedingTx == nil -> the actual preceding tx was already confirmed - (precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent) { - latestBlockNumber, err = p.client.BlockNumber(ctx) - if err != nil { - return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) - } - prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) - reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) - if err != nil { - return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) - } + if precedingTx != nil { // precedingTx == nil -> the actual preceding tx was already confirmed + var latestBlockNumber, prevBlockNumber, reorgResistantNonce uint64 + if precedingTx.FullTx.Type() != newTx.FullTx.Type() || !precedingTx.Sent { + latestBlockNumber, err = p.client.BlockNumber(ctx) + if err != nil { + return fmt.Errorf("couldn't get block number in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) + } + prevBlockNumber = arbmath.SaturatingUSub(latestBlockNumber, 1) + reorgResistantNonce, err = p.client.NonceAt(ctx, p.Sender(), new(big.Int).SetUint64(prevBlockNumber)) + if err != nil { + return fmt.Errorf("couldn't determine reorg resistant nonce in DataPoster to check if should send tx with nonce %d: %w", newTx.FullTx.Nonce(), err) + } - if precedingTx.FullTx.Nonce() > reorgResistantNonce { - log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) - return nil + if precedingTx.FullTx.Nonce() > reorgResistantNonce { + log.Info("DataPoster is avoiding creating a mempool nonce gap (the tx remains queued and will be retried)", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent) + return nil + } + } else { + log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) } - } else { - log.Info("DataPoster will send previously unsent batch tx", "nonce", newTx.FullTx.Nonce(), "prevType", precedingTx.FullTx.Type(), "type", newTx.FullTx.Type(), "prevSent", precedingTx.Sent, "latestBlockNumber", latestBlockNumber, "prevBlockNumber", prevBlockNumber, "reorgResistantNonce", reorgResistantNonce) } } From b744328c6b4c01434a528d8e93f53eba15dc75ac Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 10 May 2024 00:51:55 -0500 Subject: [PATCH 37/40] Assume stake is elevated if currentRequiredStake reverts --- staker/l1_validator.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/staker/l1_validator.go b/staker/l1_validator.go index deaf4dc2dc..d68365ede0 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -12,6 +12,7 @@ import ( "github.com/offchainlabs/nitro/staker/txbuilder" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/validator" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -187,12 +188,16 @@ func (v *L1Validator) resolveNextNode(ctx context.Context, info *StakerInfo, lat func (v *L1Validator) isRequiredStakeElevated(ctx context.Context) (bool, error) { callOpts := v.getCallOpts(ctx) - requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) + baseStake, err := v.rollup.BaseStake(callOpts) if err != nil { return false, err } - baseStake, err := v.rollup.BaseStake(callOpts) + requiredStake, err := v.rollup.CurrentRequiredStake(callOpts) if err != nil { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { + log.Warn("execution reverted checking if required state is elevated; assuming elevated", "err", err) + return true, nil + } return false, err } return requiredStake.Cmp(baseStake) > 0, nil From 75112329a44848791ba7fc50b9b0b97463a0c59d Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 13 May 2024 09:38:27 -0500 Subject: [PATCH 38/40] Fix zero bid in data poster --- arbnode/dataposter/data_poster.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index b34552a9b9..6b7644c49e 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -592,7 +592,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u targetBlobCost := arbmath.BigMulByUint(newBlobFeeCap, blobGasUsed) targetNonBlobCost := arbmath.BigSub(targetMaxCost, targetBlobCost) newBaseFeeCap := arbmath.BigDivByUint(targetNonBlobCost, gasLimit) - if lastTx != nil && numBlobs > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { + if lastTx != nil && numBlobs > 0 && lastTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBaseFeeCap, lastTx.GasFeeCap()) < minRbfIncrease { // Increase the non-blob fee cap to the minimum rbf increase newBaseFeeCap = arbmath.BigMulByBips(lastTx.GasFeeCap(), minRbfIncrease) newNonBlobCost := arbmath.BigMulByUint(newBaseFeeCap, gasLimit) @@ -665,6 +665,10 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return lastTx.GasFeeCap(), lastTx.GasTipCap(), lastTx.BlobGasFeeCap(), nil } + // Ensure we bid at least 1 wei to prevent division by zero + newBaseFeeCap = arbmath.BigMax(newBaseFeeCap, common.Big1) + newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, common.Big1) + return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } @@ -934,8 +938,8 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa } newTx := *prevTx - if arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease || - (prevTx.FullTx.BlobGasFeeCap() != nil && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { + if (prevTx.FullTx.GasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newFeeCap, prevTx.FullTx.GasFeeCap()) < minRbfIncrease) || + (prevTx.FullTx.BlobGasFeeCap() != nil && prevTx.FullTx.BlobGasFeeCap().Sign() > 0 && arbmath.BigDivToBips(newBlobFeeCap, prevTx.FullTx.BlobGasFeeCap()) < minRbfIncrease) { log.Debug( "no need to replace by fee transaction", "nonce", prevTx.FullTx.Nonce(), From 4d164e4c8b5be72010da9d370ccf5adfc7837d24 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 13 May 2024 10:04:57 -0500 Subject: [PATCH 39/40] Don't return common.Big1 to prevent mutation issues --- arbnode/dataposter/data_poster.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 6b7644c49e..2b0275c735 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -666,8 +666,12 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u } // Ensure we bid at least 1 wei to prevent division by zero - newBaseFeeCap = arbmath.BigMax(newBaseFeeCap, common.Big1) - newBlobFeeCap = arbmath.BigMax(newBlobFeeCap, common.Big1) + if newBaseFeeCap.Sign() == 0 { + newBaseFeeCap.SetInt64(1) + } + if newBlobFeeCap.Sign() == 0 { + newBlobFeeCap.SetInt64(1) + } return newBaseFeeCap, newTipCap, newBlobFeeCap, nil } From 9348855a2f900c24069d378c822a9becaba0c0fa Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 13 May 2024 10:13:56 -0500 Subject: [PATCH 40/40] Also don't mutate new*feecap --- arbnode/dataposter/data_poster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 2b0275c735..7bc18a2121 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -667,10 +667,10 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u // Ensure we bid at least 1 wei to prevent division by zero if newBaseFeeCap.Sign() == 0 { - newBaseFeeCap.SetInt64(1) + newBaseFeeCap = big.NewInt(1) } if newBlobFeeCap.Sign() == 0 { - newBlobFeeCap.SetInt64(1) + newBlobFeeCap = big.NewInt(1) } return newBaseFeeCap, newTipCap, newBlobFeeCap, nil