diff --git a/common/heapmap.go b/common/heapmap.go new file mode 100644 index 000000000000..90f51e2db25d --- /dev/null +++ b/common/heapmap.go @@ -0,0 +1,90 @@ +package common + +type HeapMap[K comparable, T Comparable[T]] struct { + h *Heap[T] + m *ShrinkingMap[K, *HeapElement[T]] + keyFromElement func(T) K +} + +func NewHeapMap[K comparable, T Comparable[T]](keyFromElement func(T) K) *HeapMap[K, T] { + return &HeapMap[K, T]{ + h: NewHeap[T](), + m: NewShrinkingMap[K, *HeapElement[T]](1000), + keyFromElement: keyFromElement, + } +} + +func (hm *HeapMap[K, T]) Len() int { + return hm.h.Len() +} + +func (hm *HeapMap[K, T]) Push(element T) bool { + k := hm.keyFromElement(element) + + if hm.m.Has(k) { + return false + } + + heapElement := hm.h.Push(element) + hm.m.Set(k, heapElement) + + return true +} + +func (hm *HeapMap[K, T]) Pop() T { + element := hm.h.Pop() + k := hm.keyFromElement(element.Value()) + hm.m.Delete(k) + + return element.Value() +} + +func (hm *HeapMap[K, T]) Peek() T { + return hm.h.Peek().Value() +} + +func (hm *HeapMap[K, T]) RemoveByElement(element T) bool { + key := hm.keyFromElement(element) + heapElement, exists := hm.m.Get(key) + if !exists { + return false + } + + hm.h.Remove(heapElement) + hm.m.Delete(key) + + return true +} + +func (hm *HeapMap[K, T]) RemoveByKey(key K) bool { + heapElement, exists := hm.m.Get(key) + if !exists { + return false + } + + hm.h.Remove(heapElement) + hm.m.Delete(key) + + return true +} + +func (hm *HeapMap[K, T]) Clear() { + hm.h.Clear() + hm.m = NewShrinkingMap[K, *HeapElement[T]](1000) +} + +func (hm *HeapMap[K, T]) Keys() []K { + return hm.m.Keys() +} + +func (hm *HeapMap[K, T]) Elements() []T { + var elements []T + for _, element := range hm.m.Values() { + elements = append(elements, element.Value()) + } + return elements +} + +func (hm *HeapMap[K, T]) Has(element T) bool { + return hm.m.Has(hm.keyFromElement(element)) +} diff --git a/common/shrinkingmap.go b/common/shrinkingmap.go index 4bf98f87c2da..a62c23a7b6c8 100644 --- a/common/shrinkingmap.go +++ b/common/shrinkingmap.go @@ -47,6 +47,22 @@ func (s *ShrinkingMap[K, V]) Delete(key K) (deleted bool) { return true } +func (s *ShrinkingMap[K, V]) Keys() []K { + var keys []K + for k := range s.m { + keys = append(keys, k) + } + return keys +} + +func (s *ShrinkingMap[K, V]) Values() []V { + var values []V + for _, v := range s.m { + values = append(values, v) + } + return values +} + func (s *ShrinkingMap[K, V]) Size() (size int) { return len(s.m) } diff --git a/eth/backend.go b/eth/backend.go index 2b6c663d2744..a119708e52be 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -57,6 +57,7 @@ import ( "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/rollup/ccc" "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rpc" @@ -109,7 +110,7 @@ type Ethereum struct { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthClient) (*Ethereum, error) { +func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ethereum, error) { // Ensure configuration values are compatible and sane if config.SyncMode == downloader.LightSync { return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum") diff --git a/rollup/da_syncer/blob_client/beacon_node_client.go b/rollup/da_syncer/blob_client/beacon_node_client.go index 5bfd7b9edf6c..adb61a4199ff 100644 --- a/rollup/da_syncer/blob_client/beacon_node_client.go +++ b/rollup/da_syncer/blob_client/beacon_node_client.go @@ -12,12 +12,10 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" ) type BeaconNodeClient struct { apiEndpoint string - l1Client *rollup_sync_service.L1Client genesisTime uint64 secondsPerSlot uint64 } @@ -28,7 +26,7 @@ var ( beaconNodeBlobEndpoint = "/eth/v1/beacon/blob_sidecars" ) -func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Client) (*BeaconNodeClient, error) { +func NewBeaconNodeClient(apiEndpoint string) (*BeaconNodeClient, error) { // get genesis time genesisPath, err := url.JoinPath(apiEndpoint, beaconNodeGenesisEndpoint) if err != nil { @@ -94,19 +92,13 @@ func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Cli return &BeaconNodeClient{ apiEndpoint: apiEndpoint, - l1Client: l1Client, genesisTime: genesisTime, secondsPerSlot: secondsPerSlot, }, nil } -func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { - // get block timestamp to calculate slot - header, err := c.l1Client.GetHeaderByNumber(blockNumber) - if err != nil { - return nil, fmt.Errorf("failed to get header by number, err: %w", err) - } - slot := (header.Time - c.genesisTime) / c.secondsPerSlot +func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { + slot := (blockTime - c.genesisTime) / c.secondsPerSlot // get blob sidecar for slot blobSidecarPath, err := url.JoinPath(c.apiEndpoint, beaconNodeBlobEndpoint, fmt.Sprintf("%d", slot)) @@ -156,7 +148,7 @@ func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Cont } } - return nil, fmt.Errorf("missing blob %v in slot %d, block number %d", versionedHash, slot, blockNumber) + return nil, fmt.Errorf("missing blob %v in slot %d", versionedHash, slot) } type GenesisResp struct { diff --git a/rollup/da_syncer/blob_client/blob_client.go b/rollup/da_syncer/blob_client/blob_client.go index 814b1d4faf2d..70635311559f 100644 --- a/rollup/da_syncer/blob_client/blob_client.go +++ b/rollup/da_syncer/blob_client/blob_client.go @@ -17,7 +17,7 @@ const ( ) type BlobClient interface { - GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) + GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) } type BlobClients struct { @@ -32,13 +32,13 @@ func NewBlobClients(blobClients ...BlobClient) *BlobClients { } } -func (c *BlobClients) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlobClients) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { if len(c.list) == 0 { return nil, fmt.Errorf("BlobClients.GetBlobByVersionedHash: list of BlobClients is empty") } for i := 0; i < len(c.list); i++ { - blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, blockNumber) + blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, blockTime) if err == nil { return blob, nil } diff --git a/rollup/da_syncer/blob_client/blob_scan_client.go b/rollup/da_syncer/blob_client/blob_scan_client.go index 24b03bed32b9..0185cc9dc96d 100644 --- a/rollup/da_syncer/blob_client/blob_scan_client.go +++ b/rollup/da_syncer/blob_client/blob_scan_client.go @@ -26,7 +26,7 @@ func NewBlobScanClient(apiEndpoint string) *BlobScanClient { } } -func (c *BlobScanClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlobScanClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { // blobscan api docs https://api.blobscan.com/#/blobs/blob-getByBlobId path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) if err != nil { diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go index ddd574d02d10..ef8852a599ee 100644 --- a/rollup/da_syncer/blob_client/block_native_client.go +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -24,7 +24,7 @@ func NewBlockNativeClient(apiEndpoint string) *BlockNativeClient { } } -func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { // blocknative api docs https://docs.blocknative.com/blocknative-data-archive/blob-archive path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) if err != nil { diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index db0f5f01c107..a7489c72c838 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -9,12 +9,10 @@ import ( "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) const ( @@ -35,7 +33,7 @@ var ( type CalldataBlobSource struct { ctx context.Context - l1Client *rollup_sync_service.L1Client + l1Reader *l1.Reader blobClient blob_client.BlobClient l1height uint64 scrollChainABI *abi.ABI @@ -47,14 +45,14 @@ type CalldataBlobSource struct { l1Finalized uint64 } -func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { - scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() +func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Reader, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { + scrollChainABI, err := l1.ScrollChainMetaData.GetAbi() if err != nil { return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } return &CalldataBlobSource{ ctx: ctx, - l1Client: l1Client, + l1Reader: l1Reader, blobClient: blobClient, l1height: l1height, scrollChainABI: scrollChainABI, @@ -73,7 +71,7 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { // Otherwise, we know that there's more finalized blocks than we want to request up to // -> no need to query finalized block number if to > ds.l1Finalized { - ds.l1Finalized, err = ds.l1Client.GetLatestFinalizedBlockNumber() + ds.l1Finalized, err = ds.l1Reader.GetLatestFinalizedBlockNumber() if err != nil { return nil, serrors.NewTemporaryError(fmt.Errorf("failed to query GetLatestFinalizedBlockNumber, error: %v", err)) } @@ -85,13 +83,13 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { return nil, ErrSourceExhausted } - logs, err := ds.l1Client.FetchRollupEventsInRange(ds.l1height, to) + rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1height, to) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get events, l1height: %d, error: %v", ds.l1height, err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1height: %d, error: %v", ds.l1height, err)) } - da, err := ds.processLogsToDA(logs) + da, err := ds.processRollupEventsToDA(rollupEvents) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process logs to DA, error: %v", err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process rollup events to DA, error: %v", err)) } ds.l1height = to + 1 @@ -102,48 +100,30 @@ func (ds *CalldataBlobSource) L1Height() uint64 { return ds.l1height } -func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) { +func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries var entry Entry var err error - - for _, vLog := range logs { - switch vLog.Topics[0] { - case ds.l1CommitBatchEventSignature: - event := &rollup_sync_service.L1CommitBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, commitBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + for _, rollupEvent := range rollupEvents { + switch rollupEvent.Type() { + case l1.CommitEventType: + commitEvent, ok := rollupEvent.(*l1.CommitBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) } - - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new CommitBatch event", "batch index", batchIndex) - - if entry, err = ds.getCommitBatchDA(batchIndex, &vLog); err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", batchIndex, err) + if entry, err = ds.getCommitBatchDA(commitEvent); err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) } - case ds.l1RevertBatchEventSignature: - event := &rollup_sync_service.L1RevertBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, revertBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) - } + case l1.RevertEventType: + entry = NewRevertBatch(rollupEvent.BatchIndex().Uint64()) - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new RevertBatchType event", "batch index", batchIndex) - entry = NewRevertBatch(batchIndex) - - case ds.l1FinalizeBatchEventSignature: - event := &rollup_sync_service.L1FinalizeBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) - } - - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) - entry = NewFinalizeBatch(batchIndex) + case l1.FinalizeEventType: + entry = NewFinalizeBatch(rollupEvent.BatchIndex().Uint64()) default: - return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + return nil, fmt.Errorf("unknown rollup event, type: %v", rollupEvent.Type()) } entries = append(entries, entry) @@ -151,97 +131,27 @@ func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) return entries, nil } -type commitBatchArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte -} - -func newCommitBatchArgs(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { - var args commitBatchArgs - err := method.Inputs.Copy(&args, values) - return &args, err -} - -func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { - var args commitBatchWithBlobProofArgs - err := method.Inputs.Copy(&args, values) - if err != nil { - return nil, err - } - return &commitBatchArgs{ - Version: args.Version, - ParentBatchHeader: args.ParentBatchHeader, - Chunks: args.Chunks, - SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, - }, nil -} - -type commitBatchWithBlobProofArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - BlobDataProof []byte -} - -func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Log) (Entry, error) { - if batchIndex == 0 { +func (ds *CalldataBlobSource) getCommitBatchDA(commitEvent *l1.CommitBatchEvent) (Entry, error) { + if commitEvent.BatchIndex().Uint64() == 0 { return NewCommitBatchDAV0Empty(), nil } - txData, err := ds.l1Client.FetchTxData(vLog) + args, err := ds.l1Reader.FetchCommitTxData(commitEvent) if err != nil { - return nil, fmt.Errorf("failed to fetch tx data, tx hash: %v, err: %w", vLog.TxHash.Hex(), err) - } - if len(txData) < methodIDLength { - return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + return nil, fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", commitEvent.BatchIndex().Uint64(), commitEvent.TxHash().Hex(), err) } - method, err := ds.scrollChainABI.MethodById(txData[:methodIDLength]) - if err != nil { - return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) - } - values, err := method.Inputs.Unpack(txData[methodIDLength:]) + codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version)) if err != nil { - return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) - } - if method.Name == commitBatchMethodName { - args, err := newCommitBatchArgs(method, values) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - codecVersion := encoding.CodecVersion(args.Version) - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) - } - switch args.Version { - case 0: - return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) - case 1, 2: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) - } - } else if method.Name == commitBatchWithBlobProofMethodName { - args, err := newCommitBatchArgsFromCommitBatchWithProof(method, values) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - codecVersion := encoding.CodecVersion(args.Version) - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) - } - switch args.Version { - case 3, 4: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) - } + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", args.Version, commitEvent.BatchIndex().Uint64(), err) } - return nil, fmt.Errorf("unknown method name: %s", method.Name) + switch codec.Version() { + case 0: + return NewCommitBatchDAV0(ds.db, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + case 1, 2, 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, ds.l1Reader, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } } diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 135a76d79518..2c4f07869da1 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -10,6 +10,7 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type CommitBatchDAV0 struct { @@ -25,19 +26,17 @@ type CommitBatchDAV0 struct { func NewCommitBatchDAV0(db ethdb.Database, codec encoding.Codec, - version uint8, - batchIndex uint64, + commitEvent *l1.CommitBatchEvent, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, - l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { - return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, l1BlockNumber) + return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, @@ -141,6 +140,7 @@ func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) in func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { var txs []*types.L1MessageTx + decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) if err != nil { return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 4670eec8bbcb..ccd598a691c4 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -8,10 +8,9 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" "github.com/scroll-tech/go-ethereum/ethdb" ) @@ -21,32 +20,34 @@ type CommitBatchDAV1 struct { } func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, - codec encoding.Codec, - l1Client *rollup_sync_service.L1Client, + l1Reader *l1.Reader, blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, + codec encoding.Codec, + commitEvent *l1.CommitBatchEvent, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, ) (*CommitBatchDAV1, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { - return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", commitEvent.BatchIndex().Uint64(), err) } - versionedHash, err := l1Client.FetchTxBlobHash(vLog) + versionedHash, err := l1Reader.FetchTxBlobHash(commitEvent.TxHash(), commitEvent.BlockHash()) if err != nil { return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) } - blob, err := blobClient.GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, vLog.BlockNumber) + header, err := l1Reader.FetchBlockHeaderByNumber(commitEvent.BlockNumber()) + if err != nil { + return nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, header.Time) if err != nil { return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) } if blob == nil { - return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", batchIndex, versionedHash.String(), blobClient) + return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", commitEvent.BatchIndex().Uint64(), versionedHash.String(), blobClient) } // compute blob versioned hash and compare with one from tx @@ -69,7 +70,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) + v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) if err != nil { return nil, err } diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go index 7beab3baea32..048fec6bb3e2 100644 --- a/rollup/da_syncer/data_source.go +++ b/rollup/da_syncer/data_source.go @@ -8,7 +8,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type DataSource interface { @@ -19,21 +19,21 @@ type DataSource interface { type DataSourceFactory struct { config Config genesisConfig *params.ChainConfig - l1Client *rollup_sync_service.L1Client + l1Reader *l1.Reader blobClient blob_client.BlobClient db ethdb.Database } -func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { +func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Reader *l1.Reader, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { return &DataSourceFactory{ config: config, genesisConfig: genesisConfig, - l1Client: l1Client, + l1Reader: l1Reader, blobClient: blobClient, db: db, } } func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { - return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Reader, ds.blobClient, ds.db) } diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go index 27eaf20cb38a..6ed84fe85186 100644 --- a/rollup/da_syncer/syncing_pipeline.go +++ b/rollup/da_syncer/syncing_pipeline.go @@ -15,8 +15,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - "github.com/scroll-tech/go-ethereum/rollup/sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) // Config is the configuration parameters of data availability syncing. @@ -42,20 +41,18 @@ type SyncingPipeline struct { daSyncer *DASyncer } -func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient sync_service.EthClient, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { - scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() +func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient l1.Client, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { + l1Reader, err := l1.NewReader(ctx, l1.Config{ + ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, + }, ethClient) if err != nil { - return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) - } - - l1Client, err := rollup_sync_service.NewL1Client(ctx, ethClient, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) - if err != nil { - return nil, err + return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) } blobClientList := blob_client.NewBlobClients() if config.BeaconNodeAPIEndpoint != "" { - beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint, l1Client) + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint) if err != nil { log.Warn("failed to create BeaconNodeClient", "err", err) } else { @@ -72,7 +69,7 @@ func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesi return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") } - dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Client, blobClientList, db) + dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Reader, blobClientList, db) syncedL1Height := l1DeploymentBlock - 1 from := rawdb.ReadDASyncedL1BlockNumber(db) if from != nil { diff --git a/rollup/l1/abi.go b/rollup/l1/abi.go new file mode 100644 index 000000000000..c16123aa5e8b --- /dev/null +++ b/rollup/l1/abi.go @@ -0,0 +1,245 @@ +package l1 + +import ( + "fmt" + "math/big" + + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/accounts/abi/bind" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +var ( + // ScrollChainABI holds information about ScrollChain's context and available invokable methods. + ScrollChainABI *abi.ABI + // L1MessageQueueABIManual holds information about L1MessageQueue's context and available invokable methods. + L1MessageQueueABIManual *abi.ABI +) + +func init() { + ScrollChainABI, _ = ScrollChainMetaData.GetAbi() + L1MessageQueueABIManual, _ = L1MessageQueueMetaDataManual.GetAbi() +} + +// ScrollChainMetaData contains ABI of the ScrollChain contract. +var ScrollChainMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", +} + +// L1MessageQueueMetaDataManual contains all meta data concerning the L1MessageQueue contract. +var L1MessageQueueMetaDataManual = &bind.MetaData{ + ABI: "[{\"type\":\"constructor\",\"inputs\":[{\"name\":\"_messenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_scrollChain\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_enforcedTxGateway\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"appendCrossDomainMessage\",\"inputs\":[{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"appendEnforcedTransaction\",\"inputs\":[{\"name\":\"_sender\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"calculateIntrinsicGasFee\",\"inputs\":[{\"name\":\"_calldata\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"computeTransactionHash\",\"inputs\":[{\"name\":\"_sender\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_value\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"dropCrossDomainMessage\",\"inputs\":[{\"name\":\"_index\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"enforcedTxGateway\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"estimateCrossDomainMessageFee\",\"inputs\":[{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"finalizePoppedCrossDomainMessage\",\"inputs\":[{\"name\":\"_newFinalizedQueueIndexPlusOne\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"gasOracle\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getCrossDomainMessage\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_messenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_scrollChain\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_enforcedTxGateway\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasOracle\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_maxGasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"isMessageDropped\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"isMessageSkipped\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"maxGasLimit\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"messageQueue\",\"inputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"messenger\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextCrossDomainMessageIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextUnfinalizedQueueIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"pendingQueueIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"popCrossDomainMessage\",\"inputs\":[{\"name\":\"_startIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_count\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_skippedBitmap\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"resetPoppedCrossDomainMessage\",\"inputs\":[{\"name\":\"_startIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"scrollChain\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateGasOracle\",\"inputs\":[{\"name\":\"_newGasOracle\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateMaxGasLimit\",\"inputs\":[{\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"DequeueTransaction\",\"inputs\":[{\"name\":\"startIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"count\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"skippedBitmap\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"DropTransaction\",\"inputs\":[{\"name\":\"index\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"FinalizedDequeuedTransaction\",\"inputs\":[{\"name\":\"finalizedIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint8\",\"indexed\":false,\"internalType\":\"uint8\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"QueueTransaction\",\"inputs\":[{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"target\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"value\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"queueIndex\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"},{\"name\":\"gasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ResetDequeuedTransaction\",\"inputs\":[{\"name\":\"startIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"UpdateGasOracle\",\"inputs\":[{\"name\":\"_oldGasOracle\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"_newGasOracle\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"UpdateMaxGasLimit\",\"inputs\":[{\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"ErrorZeroAddress\",\"inputs\":[]}]", +} + +const ( + // CommitEventType contains data of event of commit batch + CommitEventType int = iota + // RevertEventType contains data of event of revert batch + RevertEventType + // FinalizeEventType contains data of event of finalize batch + FinalizeEventType + + commitBatchMethodName = "commitBatch" + commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" + + // the length of method ID at the beginning of transaction data + methodIDLength = 4 +) + +// RollupEvent represents a single rollup event (commit, revert, finalize) +type RollupEvent interface { + Type() int + BatchIndex() *big.Int + BatchHash() common.Hash + TxHash() common.Hash + BlockHash() common.Hash + BlockNumber() uint64 +} + +type RollupEvents []RollupEvent + +// CommitBatchEventUnpacked represents a CommitBatch event raised by the ScrollChain contract. +type CommitBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash +} + +// CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract with additional fields. +type CommitBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (c *CommitBatchEvent) Type() int { + return CommitEventType +} + +func (c *CommitBatchEvent) BatchIndex() *big.Int { + return c.batchIndex +} + +func (c *CommitBatchEvent) BatchHash() common.Hash { + return c.batchHash +} + +func (c *CommitBatchEvent) TxHash() common.Hash { + return c.txHash +} + +func (c *CommitBatchEvent) BlockHash() common.Hash { + return c.blockHash +} + +func (c *CommitBatchEvent) BlockNumber() uint64 { + return c.blockNumber +} + +func (c *CommitBatchEvent) CompareTo(other *CommitBatchEvent) int { + return c.batchIndex.Cmp(other.batchIndex) +} + +type RevertBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash +} + +// RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract. +type RevertBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (r *RevertBatchEvent) BlockNumber() uint64 { + return r.blockNumber +} + +func (r *RevertBatchEvent) BlockHash() common.Hash { + return r.blockHash +} + +func (r *RevertBatchEvent) TxHash() common.Hash { + return r.txHash +} + +func (r *RevertBatchEvent) Type() int { + return RevertEventType +} + +func (r *RevertBatchEvent) BatchIndex() *big.Int { + return r.batchIndex +} + +func (r *RevertBatchEvent) BatchHash() common.Hash { + return r.batchHash +} + +type FinalizeBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash + StateRoot common.Hash + WithdrawRoot common.Hash +} + +// FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract. +type FinalizeBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + stateRoot common.Hash + withdrawRoot common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (f *FinalizeBatchEvent) TxHash() common.Hash { + return f.txHash +} + +func (f *FinalizeBatchEvent) BlockHash() common.Hash { + return f.blockHash +} + +func (f *FinalizeBatchEvent) BlockNumber() uint64 { + return f.blockNumber +} + +func (f *FinalizeBatchEvent) Type() int { + return FinalizeEventType +} + +func (f *FinalizeBatchEvent) BatchIndex() *big.Int { + return f.batchIndex +} + +func (f *FinalizeBatchEvent) BatchHash() common.Hash { + return f.batchHash +} + +func (f *FinalizeBatchEvent) StateRoot() common.Hash { + return f.stateRoot +} + +func (f *FinalizeBatchEvent) WithdrawRoot() common.Hash { + return f.withdrawRoot +} + +// UnpackLog unpacks a retrieved log into the provided output structure. +func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error { + if log.Topics[0] != c.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } + if len(log.Data) > 0 { + if err := c.UnpackIntoInterface(out, event, log.Data); err != nil { + return err + } + } + var indexed abi.Arguments + for _, arg := range c.Events[event].Inputs { + if arg.Indexed { + indexed = append(indexed, arg) + } + } + return abi.ParseTopics(out, indexed, log.Topics[1:]) +} + +type CommitBatchArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte +} + +func newCommitBatchArgs(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { + var args CommitBatchArgs + err := method.Inputs.Copy(&args, values) + return &args, err +} + +func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { + var args commitBatchWithBlobProofArgs + err := method.Inputs.Copy(&args, values) + if err != nil { + return nil, err + } + return &CommitBatchArgs{ + Version: args.Version, + ParentBatchHeader: args.ParentBatchHeader, + Chunks: args.Chunks, + SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, + }, nil +} + +type commitBatchWithBlobProofArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte + BlobDataProof []byte +} diff --git a/rollup/l1/abi_test.go b/rollup/l1/abi_test.go new file mode 100644 index 000000000000..ab4c9d473a16 --- /dev/null +++ b/rollup/l1/abi_test.go @@ -0,0 +1,82 @@ +package l1 + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto" +) + +func TestEventSignatures(t *testing.T) { + scrollChainABI, err := ScrollChainMetaData.GetAbi() + if err != nil { + t.Fatal("failed to get scroll chain abi", "err", err) + } + + assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) +} + +func TestUnpackLog(t *testing.T) { + scrollChainABI, err := ScrollChainMetaData.GetAbi() + require.NoError(t, err) + + mockBatchIndex := big.NewInt(123) + mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) + mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) + mockWithdrawRoot := crypto.Keccak256Hash([]byte("mockWithdrawRoot")) + + tests := []struct { + eventName string + mockLog types.Log + expected interface{} + out interface{} + }{ + { + "CommitBatch", + types.Log{ + Data: []byte{}, + Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &CommitBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, + &CommitBatchEvent{}, + }, + { + "RevertBatch", + types.Log{ + Data: []byte{}, + Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &RevertBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, + &RevertBatchEvent{}, + }, + { + "FinalizeBatch", + types.Log{ + Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), + Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &FinalizeBatchEvent{ + batchIndex: mockBatchIndex, + batchHash: mockBatchHash, + stateRoot: mockStateRoot, + withdrawRoot: mockWithdrawRoot, + }, + &FinalizeBatchEvent{}, + }, + } + + for _, tt := range tests { + t.Run(tt.eventName, func(t *testing.T) { + err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) + assert.NoError(t, err) + assert.Equal(t, tt.expected, tt.out) + }) + } +} diff --git a/rollup/l1/l1msg_bindings.go b/rollup/l1/l1msg_bindings.go new file mode 100644 index 000000000000..679623818423 --- /dev/null +++ b/rollup/l1/l1msg_bindings.go @@ -0,0 +1,150 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +// generated using: +// forge flatten src/L1/rollup/L1MessageQueue.sol > flatten.sol +// go run github.com/scroll-tech/go-ethereum/cmd/abigen@develop --sol flatten.sol --pkg rollup --out ./L1MessageQueue.go --contract L1MessageQueue + +package l1 + +import ( + "math/big" + "strings" + + ethereum "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/accounts/abi/bind" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +// L1MessageQueueMetaData contains all meta data concerning the L1MessageQueue contract. +var L1MessageQueueMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"DequeueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"queueIndex\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"QueueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGateway\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"UpdateEnforcedTxGateway\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGasOracle\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"UpdateGasOracle\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxGasLimit\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendEnforcedTransaction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_calldata\",\"type\":\"bytes\"}],\"name\":\"calculateIntrinsicGasFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"computeTransactionHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enforcedTxGateway\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"gasOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"}],\"name\":\"getCrossDomainMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_scrollChain\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_enforcedTxGateway\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_gasOracle\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_maxGasLimit\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextCrossDomainMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingQueueIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_count\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"popCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scrollChain\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"updateEnforcedTxGateway\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"updateGasOracle\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"updateMaxGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// L1MessageQueueABI is the input ABI used to generate the binding from. +// Deprecated: Use L1MessageQueueMetaData.ABI instead. +var L1MessageQueueABI = L1MessageQueueMetaData.ABI + +// L1MessageQueueFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type L1MessageQueueFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// NewL1MessageQueueFilterer creates a new log filterer instance of L1MessageQueue, bound to a specific deployed contract. +func NewL1MessageQueueFilterer(address common.Address, filterer bind.ContractFilterer) (*L1MessageQueueFilterer, error) { + contract, err := bindL1MessageQueue(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &L1MessageQueueFilterer{contract: contract}, nil +} + +// bindL1MessageQueue binds a generic wrapper to an already deployed contract. +func bindL1MessageQueue(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(L1MessageQueueABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// L1MessageQueueQueueTransactionIterator is returned from FilterQueueTransaction and is used to iterate over the raw logs and unpacked data for QueueTransaction events raised by the L1MessageQueue contract. +type L1MessageQueueQueueTransactionIterator struct { + Event *L1MessageQueueQueueTransaction // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *L1MessageQueueQueueTransactionIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(L1MessageQueueQueueTransaction) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(L1MessageQueueQueueTransaction) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *L1MessageQueueQueueTransactionIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *L1MessageQueueQueueTransactionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// L1MessageQueueQueueTransaction represents a QueueTransaction event raised by the L1MessageQueue contract. +type L1MessageQueueQueueTransaction struct { + Sender common.Address + Target common.Address + Value *big.Int + QueueIndex uint64 + GasLimit *big.Int + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterQueueTransaction is a free log retrieval operation binding the contract event 0x69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e. +// +// Solidity: event QueueTransaction(address indexed sender, address indexed target, uint256 value, uint64 queueIndex, uint256 gasLimit, bytes data) +func (_L1MessageQueue *L1MessageQueueFilterer) FilterQueueTransaction(opts *bind.FilterOpts, sender []common.Address, target []common.Address) (*L1MessageQueueQueueTransactionIterator, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var targetRule []interface{} + for _, targetItem := range target { + targetRule = append(targetRule, targetItem) + } + + logs, sub, err := _L1MessageQueue.contract.FilterLogs(opts, "QueueTransaction", senderRule, targetRule) + if err != nil { + return nil, err + } + return &L1MessageQueueQueueTransactionIterator{contract: _L1MessageQueue.contract, event: "QueueTransaction", logs: logs, sub: sub}, nil +} diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go new file mode 100644 index 000000000000..cc06296b657e --- /dev/null +++ b/rollup/l1/reader.go @@ -0,0 +1,381 @@ +package l1 + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rpc" +) + +const ( + commitBatchEventName = "CommitBatch" + revertBatchEventName = "RevertBatch" + finalizeBatchEventName = "FinalizeBatch" + nextUnfinalizedQueueIndex = "nextUnfinalizedQueueIndex" + lastFinalizedBatchIndex = "lastFinalizedBatchIndex" + + defaultL1MsgFetchBlockRange = 500 + defaultRollupEventsFetchBlockRange = 100 +) + +type Reader struct { + ctx context.Context + config Config + client Client + + scrollChainABI *abi.ABI + l1MessageQueueABI *abi.ABI + l1CommitBatchEventSignature common.Hash + l1RevertBatchEventSignature common.Hash + l1FinalizeBatchEventSignature common.Hash +} + +// Config is the configuration parameters of data availability syncing. +type Config struct { + ScrollChainAddress common.Address // address of ScrollChain contract + L1MessageQueueAddress common.Address // address of L1MessageQueue contract +} + +// NewReader initializes a new Reader instance +func NewReader(ctx context.Context, config Config, l1Client Client) (*Reader, error) { + if config.ScrollChainAddress == (common.Address{}) { + return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") + } + + if config.L1MessageQueueAddress == (common.Address{}) { + return nil, errors.New("must pass non-zero l1MessageQueueAddress to L1Client") + } + + reader := Reader{ + ctx: ctx, + config: config, + client: l1Client, + + scrollChainABI: ScrollChainABI, + l1MessageQueueABI: L1MessageQueueABIManual, + l1CommitBatchEventSignature: ScrollChainABI.Events[commitBatchEventName].ID, + l1RevertBatchEventSignature: ScrollChainABI.Events[revertBatchEventName].ID, + l1FinalizeBatchEventSignature: ScrollChainABI.Events[finalizeBatchEventName].ID, + } + + return &reader, nil +} + +func (r *Reader) FinalizedL1MessageQueueIndex(blockNumber uint64) (uint64, error) { + data, err := r.l1MessageQueueABI.Pack(nextUnfinalizedQueueIndex) + if err != nil { + return 0, fmt.Errorf("failed to pack %s: %w", nextUnfinalizedQueueIndex, err) + } + + result, err := r.client.CallContract(r.ctx, ethereum.CallMsg{ + To: &r.config.L1MessageQueueAddress, + Data: data, + }, new(big.Int).SetUint64(blockNumber)) + if err != nil { + return 0, fmt.Errorf("failed to call %s: %w", nextUnfinalizedQueueIndex, err) + } + + var parsedResult *big.Int + if err = r.l1MessageQueueABI.UnpackIntoInterface(&parsedResult, nextUnfinalizedQueueIndex, result); err != nil { + return 0, fmt.Errorf("failed to unpack result: %w", err) + } + + next := parsedResult.Uint64() + if next == 0 { + return 0, nil + } + + return next - 1, nil +} + +func (r *Reader) LatestFinalizedBatch(blockNumber uint64) (uint64, error) { + data, err := r.scrollChainABI.Pack(lastFinalizedBatchIndex) + if err != nil { + return 0, fmt.Errorf("failed to pack %s: %w", lastFinalizedBatchIndex, err) + } + + result, err := r.client.CallContract(r.ctx, ethereum.CallMsg{ + To: &r.config.ScrollChainAddress, + Data: data, + }, new(big.Int).SetUint64(blockNumber)) + if err != nil { + return 0, fmt.Errorf("failed to call %s: %w", lastFinalizedBatchIndex, err) + } + + var parsedResult *big.Int + if err = r.scrollChainABI.UnpackIntoInterface(&parsedResult, lastFinalizedBatchIndex, result); err != nil { + return 0, fmt.Errorf("failed to unpack result: %w", err) + } + + return parsedResult.Uint64(), nil +} + +// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. +func (r *Reader) GetLatestFinalizedBlockNumber() (uint64, error) { + header, err := r.client.HeaderByNumber(r.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) + if err != nil { + return 0, err + } + if !header.Number.IsInt64() { + return 0, fmt.Errorf("received unexpected block number in L1Client: %v", header.Number) + } + return header.Number.Uint64(), nil +} + +// FetchBlockHeaderByNumber fetches the block header by number +func (r *Reader) FetchBlockHeaderByNumber(blockNumber uint64) (*types.Header, error) { + return r.client.HeaderByNumber(r.ctx, big.NewInt(int64(blockNumber))) +} + +// FetchTxData fetches tx data corresponding to given event log +func (r *Reader) FetchTxData(txHash, blockHash common.Hash) ([]byte, error) { + tx, err := r.fetchTx(txHash, blockHash) + if err != nil { + return nil, err + } + return tx.Data(), nil +} + +// FetchTxBlobHash fetches tx blob hash corresponding to given event log +func (r *Reader) FetchTxBlobHash(txHash, blockHash common.Hash) (common.Hash, error) { + tx, err := r.fetchTx(txHash, blockHash) + if err != nil { + return common.Hash{}, err + } + blobHashes := tx.BlobHashes() + if len(blobHashes) == 0 { + return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) + } + return blobHashes[0], nil +} + +// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (r *Reader) FetchRollupEventsInRange(from, to uint64) (RollupEvents, error) { + log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) + var logs []types.Log + + err := queryInBatches(r.ctx, from, to, defaultRollupEventsFetchBlockRange, func(from, to uint64) (bool, error) { + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(from)), // inclusive + ToBlock: big.NewInt(int64(to)), // inclusive + Addresses: []common.Address{ + r.config.ScrollChainAddress, + }, + Topics: make([][]common.Hash, 1), + } + query.Topics[0] = make([]common.Hash, 3) + query.Topics[0][0] = r.l1CommitBatchEventSignature + query.Topics[0][1] = r.l1RevertBatchEventSignature + query.Topics[0][2] = r.l1FinalizeBatchEventSignature + + logsBatch, err := r.client.FilterLogs(r.ctx, query) + if err != nil { + return false, fmt.Errorf("failed to filter logs, err: %w", err) + } + logs = append(logs, logsBatch...) + return true, nil + }) + if err != nil { + return nil, err + } + return r.processLogsToRollupEvents(logs) +} + +// FetchRollupEventsInRangeWithCallback retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (r *Reader) FetchRollupEventsInRangeWithCallback(from, to uint64, callback func(event RollupEvent) bool) error { + log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) + + err := queryInBatches(r.ctx, from, to, defaultRollupEventsFetchBlockRange, func(from, to uint64) (bool, error) { + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(from)), // inclusive + ToBlock: big.NewInt(int64(to)), // inclusive + Addresses: []common.Address{ + r.config.ScrollChainAddress, + }, + Topics: make([][]common.Hash, 1), + } + query.Topics[0] = make([]common.Hash, 3) + query.Topics[0][0] = r.l1CommitBatchEventSignature + query.Topics[0][1] = r.l1RevertBatchEventSignature + query.Topics[0][2] = r.l1FinalizeBatchEventSignature + + logsBatch, err := r.client.FilterLogs(r.ctx, query) + if err != nil { + return false, fmt.Errorf("failed to filter logs, err: %w", err) + } + + rollupEvents, err := r.processLogsToRollupEvents(logsBatch) + if err != nil { + return false, fmt.Errorf("failed to process logs to rollup events, err: %w", err) + } + + for _, event := range rollupEvents { + if !callback(event) { + return false, nil + } + } + + return true, nil + }) + if err != nil { + return err + } + + return nil +} + +func (r *Reader) processLogsToRollupEvents(logs []types.Log) (RollupEvents, error) { + var rollupEvents RollupEvents + var rollupEvent RollupEvent + var err error + + for _, vLog := range logs { + switch vLog.Topics[0] { + case r.l1CommitBatchEventSignature: + event := &CommitBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, commitBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + } + log.Trace("found new CommitBatch event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &CommitBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + case r.l1RevertBatchEventSignature: + event := &RevertBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, revertBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) + } + log.Trace("found new RevertBatchType event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &RevertBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + case r.l1FinalizeBatchEventSignature: + event := &FinalizeBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + } + log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &FinalizeBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + stateRoot: event.StateRoot, + withdrawRoot: event.WithdrawRoot, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + default: + return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + } + + rollupEvents = append(rollupEvents, rollupEvent) + } + return rollupEvents, nil +} + +func queryInBatches(ctx context.Context, fromBlock, toBlock uint64, batchSize uint64, queryFunc func(from, to uint64) (bool, error)) error { + for from := fromBlock; from <= toBlock; from += batchSize { + // check if context is done and return if it is + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + to := from + batchSize - 1 + if to > toBlock { + to = toBlock + } + cont, err := queryFunc(from, to) + if err != nil { + return fmt.Errorf("error querying blocks %d to %d: %w", from, to, err) + } + if !cont { + break + } + } + return nil +} + +// fetchTx fetches tx corresponding to given event log +func (r *Reader) fetchTx(txHash, blockHash common.Hash) (*types.Transaction, error) { + tx, _, err := r.client.TransactionByHash(r.ctx, txHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", txHash.Hex(), "block hash", blockHash.Hex(), "err", err) + block, err := r.client.BlockByHash(r.ctx, blockHash) + if err != nil { + return nil, fmt.Errorf("failed to get block by hash, block hash: %v, err: %w", blockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == txHash { + tx = txInBlock + found = true + break + } + } + if !found { + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block hash: %v", txHash.Hex(), blockHash.Hex()) + } + } + + return tx, nil +} + +func (r *Reader) FetchCommitTxData(commitEvent *CommitBatchEvent) (*CommitBatchArgs, error) { + tx, err := r.fetchTx(commitEvent.TxHash(), commitEvent.BlockHash()) + if err != nil { + return nil, err + } + txData := tx.Data() + + if len(txData) < methodIDLength { + return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + } + + method, err := r.scrollChainABI.MethodById(txData[:methodIDLength]) + if err != nil { + return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) + } + values, err := method.Inputs.Unpack(txData[methodIDLength:]) + if err != nil { + return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) + } + + var args *CommitBatchArgs + if method.Name == commitBatchMethodName { + args, err = newCommitBatchArgs(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args %s, values: %+v, err: %w", commitBatchMethodName, values, err) + } + } else if method.Name == commitBatchWithBlobProofMethodName { + args, err = newCommitBatchArgsFromCommitBatchWithProof(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args %s, values: %+v, err: %w", commitBatchWithBlobProofMethodName, values, err) + } + } else { + return nil, fmt.Errorf("unknown method name for commit transaction: %s", method.Name) + } + + return args, nil +} diff --git a/rollup/l1/reader_test.go b/rollup/l1/reader_test.go new file mode 100644 index 000000000000..5f4a2c95817a --- /dev/null +++ b/rollup/l1/reader_test.go @@ -0,0 +1,125 @@ +package l1 + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestQueryInBatches(t *testing.T) { + tests := []struct { + name string + fromBlock uint64 + toBlock uint64 + batchSize uint64 + queryFunc func(from, to uint64) (bool, error) + expectErr bool + expectedErr string + expectedCalls []struct { + from uint64 + to uint64 + } + }{ + { + name: "Successful query in single batch", + fromBlock: 1, + toBlock: 10, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + { + name: "Successful query in multiple batches", + fromBlock: 1, + toBlock: 80, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + {from: 11, to: 20}, + {from: 21, to: 30}, + {from: 31, to: 40}, + {from: 41, to: 50}, + {from: 51, to: 60}, + {from: 61, to: 70}, + {from: 71, to: 80}, + }, + }, + { + name: "Query function returns error", + fromBlock: 1, + toBlock: 10, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return false, errors.New("query error") + }, + expectErr: true, + expectedErr: "error querying blocks 1 to 10: query error", + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + { + name: "Query function returns false to stop", + fromBlock: 1, + toBlock: 20, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + if from == 1 { + return false, nil + } + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var calls []struct { + from uint64 + to uint64 + } + queryFunc := func(from, to uint64) (bool, error) { + calls = append(calls, struct { + from uint64 + to uint64 + }{from, to}) + return tt.queryFunc(from, to) + } + err := queryInBatches(context.Background(), tt.fromBlock, tt.toBlock, tt.batchSize, queryFunc) + if tt.expectErr { + require.Error(t, err) + require.EqualError(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedCalls, calls) + }) + } +} diff --git a/rollup/l1/types.go b/rollup/l1/types.go new file mode 100644 index 000000000000..8c030815ec28 --- /dev/null +++ b/rollup/l1/types.go @@ -0,0 +1,22 @@ +package l1 + +import ( + "context" + "math/big" + + "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +type Client interface { + BlockNumber(ctx context.Context) (uint64, error) + ChainID(ctx context.Context) (*big.Int, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) + SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) + TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, isPending bool, err error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +}