diff --git a/op-batcher/batcher/service.go b/op-batcher/batcher/service.go index c5a5cd0c3096..0923db861caf 100644 --- a/op-batcher/batcher/service.go +++ b/op-batcher/batcher/service.go @@ -205,6 +205,11 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error { default: return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType) } + + if bs.UsePlasma && bs.ChannelConfig.MaxFrameSize > plasma.MaxInputSize { + return fmt.Errorf("max frame size %d exceeds plasma max input size %d", bs.ChannelConfig.MaxFrameSize, plasma.MaxInputSize) + } + bs.ChannelConfig.MaxFrameSize-- // subtract 1 byte for version if bs.UseBlobs && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { diff --git a/op-node/rollup/derive/plasma_data_source.go b/op-node/rollup/derive/plasma_data_source.go index 15a7648cf1ee..ee0ac0079961 100644 --- a/op-node/rollup/derive/plasma_data_source.go +++ b/op-node/rollup/derive/plasma_data_source.go @@ -69,6 +69,12 @@ func (s *PlasmaDataSource) Next(ctx context.Context) (eth.Data, error) { // return temporary error so we can keep retrying. return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %x from da service: %w", s.comm, err)) } + // inputs are limited to a max size to ensure they can be challenged in the DA contract. + if len(data) > plasma.MaxInputSize { + s.log.Warn("input data exceeds max size", "size", len(data), "max", plasma.MaxInputSize) + s.comm = nil + return s.Next(ctx) + } // reset the commitment so we can fetch the next one from the source at the next iteration. s.comm = nil return data, nil diff --git a/op-node/rollup/derive/plasma_data_source_test.go b/op-node/rollup/derive/plasma_data_source_test.go index 4db21f9a86eb..28bd72b6f031 100644 --- a/op-node/rollup/derive/plasma_data_source_test.go +++ b/op-node/rollup/derive/plasma_data_source_test.go @@ -369,3 +369,90 @@ func TestPlasmaDataSourceStall(t *testing.T) { l1F.AssertExpectations(t) } + +func TestPlasmaDataSourceOversizedInput(t *testing.T) { + logger := testlog.Logger(t, log.LevelDebug) + ctx := context.Background() + + rng := rand.New(rand.NewSource(1234)) + + l1F := &testutils.MockL1Source{} + + storage := plasma.NewMockDAClient(logger) + + pcfg := plasma.Config{ + ChallengeWindow: 90, ResolveWindow: 90, + } + + da := plasma.NewPlasmaDAWithStorage(logger, pcfg, storage, l1F, &plasma.NoopMetrics{}) + + // Create rollup genesis and config + l1Time := uint64(2) + refA := testutils.RandomBlockRef(rng) + refA.Number = 1 + l1Refs := []eth.L1BlockRef{refA} + refA0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: 0, + ParentHash: common.Hash{}, + Time: refA.Time, + L1Origin: refA.ID(), + SequenceNumber: 0, + } + batcherPriv := testutils.RandomKey() + batcherAddr := crypto.PubkeyToAddress(batcherPriv.PublicKey) + batcherInbox := common.Address{42} + cfg := &rollup.Config{ + Genesis: rollup.Genesis{ + L1: refA.ID(), + L2: refA0.ID(), + L2Time: refA0.Time, + }, + BlockTime: 1, + SeqWindowSize: 20, + BatchInboxAddress: batcherInbox, + DAChallengeAddress: common.Address{43}, + } + + signer := cfg.L1Signer() + + factory := NewDataSourceFactory(logger, cfg, l1F, nil, da) + + parent := l1Refs[0] + // create a new mock l1 ref + ref := eth.L1BlockRef{ + Hash: testutils.RandomHash(rng), + Number: parent.Number + 1, + ParentHash: parent.Hash, + Time: parent.Time + l1Time, + } + l1F.ExpectFetchReceipts(ref.Hash, nil, types.Receipts{}, nil) + // mock input commitments in l1 transactions with an oversized input + input := testutils.RandomData(rng, plasma.MaxInputSize+1) + comm, _ := storage.SetInput(ctx, input) + + tx, err := types.SignNewTx(batcherPriv, signer, &types.DynamicFeeTx{ + ChainID: signer.ChainID(), + Nonce: 0, + GasTipCap: big.NewInt(2 * params.GWei), + GasFeeCap: big.NewInt(30 * params.GWei), + Gas: 100_000, + To: &batcherInbox, + Value: big.NewInt(int64(0)), + Data: comm, + }) + require.NoError(t, err) + + txs := []*types.Transaction{tx} + + l1F.ExpectInfoAndTxsByHash(ref.Hash, testutils.RandomBlockInfo(rng), txs, nil) + + src, err := factory.OpenData(ctx, ref, batcherAddr) + require.NoError(t, err) + + // data is skipped so should return an EOF + _, err = src.Next(ctx) + require.ErrorIs(t, err, io.EOF) + + l1F.AssertExpectations(t) +} diff --git a/op-plasma/params.go b/op-plasma/params.go new file mode 100644 index 000000000000..3377d981cd00 --- /dev/null +++ b/op-plasma/params.go @@ -0,0 +1,6 @@ +package plasma + +// Max input size ensures the canonical chain cannot include input batches too large to +// challenge in the Data Availability Challenge contract. Value in number of bytes. +// This value can only be changed in a hard fork. +const MaxInputSize = 130672