From 640e3e62f55431dc952da262f4bf37d65f8f79bd Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 12:07:34 -0800 Subject: [PATCH 01/40] Use 4844-only contracts branch --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 0a149d2af9..798934bc56 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 0a149d2af9aee566c4abf493479ec15e5fc32d98 +Subproject commit 798934bc5601ed9926ad9e8744575ecc075e0902 From 312efbb74cabc3ca2d36dfe307d929fbda9b24da Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 12:10:26 -0800 Subject: [PATCH 02/40] Handle MaxTimeVariation contract change --- arbnode/batch_poster.go | 12 ++++++------ arbnode/node.go | 8 ++++---- system_tests/full_challenge_impl_test.go | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index c4fc500d76..9b8089fbd6 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -921,22 +921,22 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, fmt.Errorf("error getting L1 bound block: %w", err) } - maxTimeVariation, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ + maxTimeVariationDelayBlocks, maxTimeVariationFutureBlocks, maxTimeVariationDelaySeconds, maxTimeVariationFutureSeconds, err := b.seqInbox.MaxTimeVariation(&bind.CallOpts{ Context: ctx, BlockNumber: l1Bound.Number, }) if err != nil { // This might happen if the latest finalized block is old enough that our L1 node no longer has its state log.Warn("error getting max time variation on L1 bound block; falling back on latest block", "err", err) - maxTimeVariation, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) + maxTimeVariationDelayBlocks, maxTimeVariationFutureBlocks, maxTimeVariationDelaySeconds, maxTimeVariationFutureSeconds, err = b.seqInbox.MaxTimeVariation(&bind.CallOpts{Context: ctx}) if err != nil { return false, fmt.Errorf("error getting max time variation: %w", err) } } l1BoundBlockNumber := arbutil.ParentHeaderToL1BlockNumber(l1Bound) - l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariation.FutureBlocks)) - l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariation.FutureSeconds)) + l1BoundMaxBlockNumber = arbmath.SaturatingUAdd(l1BoundBlockNumber, arbmath.BigToUintSaturating(maxTimeVariationFutureBlocks)) + l1BoundMaxTimestamp = arbmath.SaturatingUAdd(l1Bound.Time, arbmath.BigToUintSaturating(maxTimeVariationFutureSeconds)) if config.L1BlockBoundBypass > 0 { latestHeader, err := b.l1Reader.LastHeader(ctx) @@ -947,8 +947,8 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) blockNumberWithPadding := arbmath.SaturatingUAdd(latestBlockNumber, uint64(config.L1BlockBoundBypass/ethPosBlockTime)) timestampWithPadding := arbmath.SaturatingUAdd(latestHeader.Time, uint64(config.L1BlockBoundBypass/time.Second)) - l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelayBlocks)) - l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariation.DelaySeconds)) + l1BoundMinBlockNumber = arbmath.SaturatingUSub(blockNumberWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelayBlocks)) + l1BoundMinTimestamp = arbmath.SaturatingUSub(timestampWithPadding, arbmath.BigToUintSaturating(maxTimeVariationDelaySeconds)) } } diff --git a/arbnode/node.go b/arbnode/node.go index f92dcefe7c..6119a4fb5e 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -65,10 +65,10 @@ func GenerateRollupConfig(prod bool, wasmModuleRoot common.Hash, rollupOwner com // TODO could the ChainConfig be just []byte? ChainConfig: string(serializedChainConfig), SequencerInboxMaxTimeVariation: rollupgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: big.NewInt(60 * 60 * 24 / 15), - FutureBlocks: big.NewInt(12), - DelaySeconds: big.NewInt(60 * 60 * 24), - FutureSeconds: big.NewInt(60 * 60), + DelayBlocks: 60 * 60 * 24 / 15, + FutureBlocks: 12, + DelaySeconds: 60 * 60 * 24, + FutureSeconds: 60 * 60, }, } } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 118d17ec81..ecbfd67c7a 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -200,10 +200,10 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: big.NewInt(10000), - FutureBlocks: big.NewInt(10000), - DelaySeconds: big.NewInt(10000), - FutureSeconds: big.NewInt(10000), + DelayBlocks: 10000, + FutureBlocks: 10000, + DelaySeconds: 10000, + FutureSeconds: 10000, } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, From b9fa60817f908b9b1692cd65ad659f0048534125 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 12:11:24 -0800 Subject: [PATCH 03/40] Handle TimeBounds move to Bridge --- arbnode/delayed_seq_reorg_test.go | 8 ++++---- arbnode/sequencer_inbox.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index a28eebb5dc..9ad984ae6c 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -61,7 +61,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{1}, AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), AfterDelayedCount: 1, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -77,7 +77,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{2}, AfterDelayedAcc: userDelayed.AfterInboxAcc(), AfterDelayedCount: 2, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -91,7 +91,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{3}, AfterDelayedAcc: userDelayed.AfterInboxAcc(), AfterDelayedCount: 2, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, @@ -130,7 +130,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { AfterInboxAcc: [32]byte{2}, AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), AfterDelayedCount: 1, - TimeBounds: bridgegen.ISequencerInboxTimeBounds{}, + TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, bridgeAddress: [20]byte{}, diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index 2adfcb60b3..d0cdebfeff 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -102,7 +102,7 @@ type SequencerInboxBatch struct { AfterInboxAcc common.Hash AfterDelayedAcc common.Hash AfterDelayedCount uint64 - TimeBounds bridgegen.ISequencerInboxTimeBounds + TimeBounds bridgegen.IBridgeTimeBounds rawLog types.Log dataLocation batchDataLocation bridgeAddress common.Address From 659553f0af167228212f54367cb546ae0b3cf661 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Thu, 18 Jan 2024 15:07:13 -0800 Subject: [PATCH 04/40] Placeholder zero addresses in DeploySequencerInbox --- deploy/deploy.go | 2 +- system_tests/full_challenge_impl_test.go | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/deploy/deploy.go b/deploy/deploy.go index bd2f2ec329..b1a3523cfd 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -40,7 +40,7 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, common.Address{}, common.Address{}) // TODO upload the DataHashReader and BlobBasefeeReader err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index ecbfd67c7a..c14f4c0d51 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -199,19 +199,13 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) - timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ - DelayBlocks: 10000, - FutureBlocks: 10000, - DelaySeconds: 10000, - FutureSeconds: 10000, - } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, l1Client, - bridgeAddr, l1Info.GetAddress("sequencer"), - timeBounds, big.NewInt(117964), + common.Address{}, // TODO addresses for DataHashReader and BlobBasefeeReader + common.Address{}, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 9e39151423eec4d66b175a8a87b9920a2feefdd2 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Thu, 18 Jan 2024 20:23:08 -0700 Subject: [PATCH 05/40] Get non-challenge tests passing --- Makefile | 1 + arbnode/batch_poster.go | 8 ++--- contracts | 2 +- deploy/deploy.go | 13 +++++++- solgen/gen.go | 38 ++++++++++++++++++++++++ system_tests/common_test.go | 7 ++++- system_tests/full_challenge_impl_test.go | 4 +-- 7 files changed, 63 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 4221100961..edc6c3a6d9 100644 --- a/Makefile +++ b/Makefile @@ -331,6 +331,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro .make/solidity: $(DEP_PREDICATE) contracts/src/*/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make yarn --cwd contracts build + yarn --cwd contracts build:forge:yul @touch $@ .make/yarndeps: $(DEP_PREDICATE) contracts/package.json contracts/yarn.lock $(ORDER_ONLY_PREDICATE) .make diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 9b8089fbd6..07034ee6f8 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -338,10 +338,7 @@ func AccessList(opts *AccessListOpts) types.AccessList { StorageKeys: []common.Hash{ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), // totalDelayedMessagesRead common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), // bridge - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"), // maxTimeVariation.delayBlocks - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // maxTimeVariation.futureBlocks - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000006"), // maxTimeVariation.delaySeconds - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000007"), // maxTimeVariation.futureSeconds + common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), // maxTimeVariation // ADMIN_SLOT from OpenZeppelin, keccak-256 hash of // "eip1967.proxy.admin" subtracted by 1. common.HexToHash("0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103"), @@ -427,7 +424,8 @@ func (b *BatchPoster) checkReverts(ctx context.Context, to int64) (bool, error) if shouldHalt { logLevel = log.Error } - logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash) + txErr := arbutil.DetailTxError(ctx, b.l1Reader.Client(), tx, r) + logLevel("Transaction from batch poster reverted", "nonce", tx.Nonce(), "txHash", tx.Hash(), "blockNumber", r.BlockNumber, "blockHash", r.BlockHash, "txErr", txErr) return shouldHalt, nil } } diff --git a/contracts b/contracts index 798934bc56..b95ab08544 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 798934bc5601ed9926ad9e8744575ecc075e0902 +Subproject commit b95ab08544ae339c5ee7e7d708c9acb5e4ab1f75 diff --git a/deploy/deploy.go b/deploy/deploy.go index b1a3523cfd..94a8e81a25 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -16,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/ospgen" "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" + "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/util/headerreader" ) @@ -40,7 +41,17 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, common.Address{}, common.Address{}) // TODO upload the DataHashReader and BlobBasefeeReader + dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("data hashes reader deploy error: %w", err) + } + blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(auth, client) + err = andTxSucceeded(ctx, l1Reader, tx, err) + if err != nil { + return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) + } + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, dataHashesReader, blobBasefeeReader) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/solgen/gen.go b/solgen/gen.go index 5d43946fa5..770fa08571 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -23,6 +23,15 @@ type HardHatArtifact struct { Bytecode string `json:"bytecode"` } +type FoundryBytecode struct { + Object string `json:"object"` +} + +type FoundryArtifact struct { + Abi []interface{} `json:"abi"` + Bytecode FoundryBytecode `json:"bytecode"` +} + type moduleInfo struct { contractNames []string abis []string @@ -96,6 +105,35 @@ func main() { modInfo.addArtifact(artifact) } + yulFilePaths, err := filepath.Glob(filepath.Join(parent, "contracts", "out", "yul", "*", "*.json")) + if err != nil { + log.Fatal(err) + } + yulModInfo := modules["yulgen"] + if yulModInfo == nil { + yulModInfo = &moduleInfo{} + modules["yulgen"] = yulModInfo + } + for _, path := range yulFilePaths { + _, file := filepath.Split(path) + name := file[:len(file)-5] + + data, err := os.ReadFile(path) + if err != nil { + log.Fatal("could not read", path, "for contract", name, err) + } + + artifact := FoundryArtifact{} + if err := json.Unmarshal(data, &artifact); err != nil { + log.Fatal("failed to parse contract", name, err) + } + yulModInfo.addArtifact(HardHatArtifact{ + ContractName: name, + Abi: artifact.Abi, + Bytecode: artifact.Bytecode.Object, + }) + } + // add upgrade executor module which is not compiled locally, but imported from 'nitro-contracts' depedencies upgExecutorPath := filepath.Join(parent, "contracts", "node_modules", "@offchainlabs", "upgrade-executor", "build", "contracts", "src", "UpgradeExecutor.sol", "UpgradeExecutor.json") _, err = os.Stat(upgExecutorPath) diff --git a/system_tests/common_test.go b/system_tests/common_test.go index 2e17a50ede..a950ebd7ca 100644 --- a/system_tests/common_test.go +++ b/system_tests/common_test.go @@ -43,6 +43,9 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" + "github.com/ethereum/go-ethereum/eth/tracers" + _ "github.com/ethereum/go-ethereum/eth/tracers/js" + _ "github.com/ethereum/go-ethereum/eth/tracers/native" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -474,9 +477,10 @@ func createStackConfigForTest(dataDir string) *node.Config { stackConf.DataDir = dataDir stackConf.UseLightweightKDF = true stackConf.WSPort = 0 + stackConf.WSModules = append(stackConf.WSModules, "eth", "debug") stackConf.HTTPPort = 0 stackConf.HTTPHost = "" - stackConf.HTTPModules = append(stackConf.HTTPModules, "eth") + stackConf.HTTPModules = append(stackConf.HTTPModules, "eth", "debug") stackConf.P2P.NoDiscovery = true stackConf.P2P.NoDial = true stackConf.P2P.ListenAddr = "" @@ -605,6 +609,7 @@ func createTestL1BlockChainWithConfig(t *testing.T, l1info info, stackConfig *no Namespace: "eth", Service: filters.NewFilterAPI(filters.NewFilterSystem(l1backend.APIBackend, filters.Config{}), false), }}) + stack.RegisterAPIs(tracers.APIs(l1backend.APIBackend)) Require(t, stack.Start()) Require(t, l1backend.StartMining()) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index c14f4c0d51..1a7747092e 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -204,8 +204,8 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Client, l1Info.GetAddress("sequencer"), big.NewInt(117964), - common.Address{}, // TODO addresses for DataHashReader and BlobBasefeeReader - common.Address{}, + common.Address{1}, // TODO addresses for DataHashReader and BlobBasefeeReader + common.Address{1}, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 5e2dfea42614e7bdfa500ce21a7ec8ba29a5107a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 19 Jan 2024 01:20:54 -0700 Subject: [PATCH 06/40] Use new SequencerInboxStub to fix challenge tests --- contracts | 2 +- system_tests/full_challenge_impl_test.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/contracts b/contracts index b95ab08544..77ce30ee83 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit b95ab08544ae339c5ee7e7d708c9acb5e4ab1f75 +Subproject commit 77ce30ee8393a7b489e42f7afdbe6f3966538e72 diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 1a7747092e..a2668d69e5 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -199,10 +199,18 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) + timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ + DelayBlocks: 10000, + FutureBlocks: 10000, + DelaySeconds: 10000, + FutureSeconds: 10000, + } seqInboxAddr, tx, seqInbox, err := mocksgen.DeploySequencerInboxStub( &txOpts, l1Client, + bridgeAddr, l1Info.GetAddress("sequencer"), + timeBounds, big.NewInt(117964), common.Address{1}, // TODO addresses for DataHashReader and BlobBasefeeReader common.Address{1}, From 750a15f5f4505ef484b0539fdc86a292f144e3b3 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 19 Jan 2024 01:24:45 -0700 Subject: [PATCH 07/40] Use real blob data readers in challenge tests --- system_tests/full_challenge_impl_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index a2668d69e5..8dc9b83f32 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -35,6 +35,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/solgen/go/mocksgen" "github.com/offchainlabs/nitro/solgen/go/ospgen" + "github.com/offchainlabs/nitro/solgen/go/yulgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" "github.com/offchainlabs/nitro/validator/server_common" @@ -199,6 +200,14 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) + dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(&txOpts, l1Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Client, tx) + Require(t, err) + blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(&txOpts, l1Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Client, tx) + Require(t, err) timeBounds := mocksgen.ISequencerInboxMaxTimeVariation{ DelayBlocks: 10000, FutureBlocks: 10000, @@ -212,8 +221,8 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Info.GetAddress("sequencer"), timeBounds, big.NewInt(117964), - common.Address{1}, // TODO addresses for DataHashReader and BlobBasefeeReader - common.Address{1}, + dataHashesReader, + blobBasefeeReader, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 3619cd1a097e9f62de69a6acd970c38034a69120 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Fri, 19 Jan 2024 10:39:18 -0700 Subject: [PATCH 08/40] Update contracts to latest (unified 4844 reader contract) --- contracts | 2 +- deploy/deploy.go | 9 ++------- system_tests/full_challenge_impl_test.go | 9 ++------- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/contracts b/contracts index 77ce30ee83..a8e7709bfc 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 77ce30ee8393a7b489e42f7afdbe6f3966538e72 +Subproject commit a8e7709bfc918f9b8e2888d47f2fd8454779fd11 diff --git a/deploy/deploy.go b/deploy/deploy.go index 94a8e81a25..59760e2c21 100644 --- a/deploy/deploy.go +++ b/deploy/deploy.go @@ -41,17 +41,12 @@ func deployBridgeCreator(ctx context.Context, l1Reader *headerreader.HeaderReade return common.Address{}, fmt.Errorf("bridge deploy error: %w", err) } - dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(auth, client) - err = andTxSucceeded(ctx, l1Reader, tx, err) - if err != nil { - return common.Address{}, fmt.Errorf("data hashes reader deploy error: %w", err) - } - blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(auth, client) + reader4844, tx, _, err := yulgen.DeployReader4844(auth, client) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("blob basefee reader deploy error: %w", err) } - seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, dataHashesReader, blobBasefeeReader) + seqInboxTemplate, tx, _, err := bridgegen.DeploySequencerInbox(auth, client, maxDataSize, reader4844) err = andTxSucceeded(ctx, l1Reader, tx, err) if err != nil { return common.Address{}, fmt.Errorf("sequencer inbox deploy error: %w", err) diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 8dc9b83f32..0fa483b6ea 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -200,11 +200,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) - dataHashesReader, tx, _, err := yulgen.DeployDataHashesReader(&txOpts, l1Client) - Require(t, err) - _, err = EnsureTxSucceeded(ctx, l1Client, tx) - Require(t, err) - blobBasefeeReader, tx, _, err := yulgen.DeployBlobBasefeeReader(&txOpts, l1Client) + reader4844, tx, _, err := yulgen.DeployReader4844(&txOpts, l1Client) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) Require(t, err) @@ -221,8 +217,7 @@ func setupSequencerInboxStub(ctx context.Context, t *testing.T, l1Info *Blockcha l1Info.GetAddress("sequencer"), timeBounds, big.NewInt(117964), - dataHashesReader, - blobBasefeeReader, + reader4844, ) Require(t, err) _, err = EnsureTxSucceeded(ctx, l1Client, tx) From 574fb738c71290d3055df78ae01f80d7e57cccf2 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Jan 2024 08:54:16 -0800 Subject: [PATCH 09/40] go mod tidy after merging in geth-1.13 branch --- go.mod | 16 ++++++++-------- go.sum | 38 ++++++++++++++++++++------------------ 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index bdda6a61a1..f6f6bce80d 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 @@ -53,6 +53,7 @@ require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/DataDog/zstd v1.5.2 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect @@ -97,7 +98,7 @@ require ( github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -131,7 +132,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect - github.com/huin/goupnp v1.1.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.1.1 // indirect @@ -307,13 +308,13 @@ require ( ) require ( - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/VictoriaMetrics/fastcache v1.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect - github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-redis/redis/v8 v8.11.4 github.com/go-stack/stack v1.8.1 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect @@ -333,8 +334,7 @@ require ( github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index bf8b4b826d..a0b83650a2 100644 --- a/go.sum +++ b/go.sum @@ -55,6 +55,8 @@ github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKz github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= @@ -63,8 +65,8 @@ github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fT github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= @@ -223,15 +225,15 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -305,8 +307,8 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -401,8 +403,8 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -638,8 +640,8 @@ github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZm github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= -github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -1598,10 +1600,10 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= -github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= @@ -1948,6 +1950,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1987,7 +1990,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2009,6 +2011,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2249,8 +2253,6 @@ gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= -gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= From fe654290f345218a632139787f63cc5e39275c92 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Mon, 22 Jan 2024 17:46:13 -0800 Subject: [PATCH 10/40] Port BlobClient from old 4844 branch This ports BlobClient from the eip-4844-experimental branch, with the prysm dependency removed (relevant code copied to util/beaconclient) and the kZGToVersionedHash function copied from geth rather than modifying our fork to make it public as I had done before, since it is so simple. "A little copying is better than a little dependency." - Rob Pike, Go Proverbs --- arbnode/blob_reader.go | 185 +++++++++++++++++++++++++++++++++++ go.mod | 2 +- util/beaconclient/client.go | 98 +++++++++++++++++++ util/beaconclient/errors.go | 40 ++++++++ util/beaconclient/options.go | 48 +++++++++ 5 files changed, 372 insertions(+), 1 deletion(-) create mode 100644 arbnode/blob_reader.go create mode 100644 util/beaconclient/client.go create mode 100644 util/beaconclient/errors.go create mode 100644 util/beaconclient/options.go diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go new file mode 100644 index 0000000000..673df37b1f --- /dev/null +++ b/arbnode/blob_reader.go @@ -0,0 +1,185 @@ +// Copyright 2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package arbnode + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/util/beaconclient" + "github.com/offchainlabs/nitro/util/pretty" + "github.com/pkg/errors" + + "github.com/spf13/pflag" +) + +type BlobClient struct { + bc *beaconclient.Client + ec arbutil.L1Interface + + // The genesis time time won't change so only request it once. + cachedGenesisTime uint64 +} + +type BlobClientConfig struct { + BeaconChainUrl string `koanf:"beacon-chain-url"` +} + +var DefaultBlobClientConfig = BlobClientConfig{ + BeaconChainUrl: "", +} + +func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { + f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") +} + +func NewBlobClient(bc *beaconclient.Client, ec arbutil.L1Interface) *BlobClient { + return &BlobClient{bc: bc, ec: ec} +} + +// Get all the blobs associated with a particular block. +func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { + header, err := b.ec.HeaderByHash(ctx, blockHash) + if err != nil { + return nil, err + } + + genesisTime, err := b.genesisTime(ctx) + if err != nil { + return nil, err + } + + // TODO make denominator configurable for devnets with faster block time + slot := (header.Time - genesisTime) / 12 + + return b.blobSidecars(ctx, slot, versionedHashes) +} + +type blobResponse struct { + Data []blobResponseItem `json:"data"` +} +type blobResponseItem struct { + BlockRoot string `json:"block_root"` + Index int `json:"index"` + Slot uint64 `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex uint64 `json:"proposer_index"` + Blob string `json:"blob"` + KzgCommitment string `json:"kzg_commitment"` + KzgProof string `json:"kzg_proof"` +} + +func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { + body, err := b.bc.Get(ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + if err != nil { + return nil, errors.Wrap(err, "error calling beacon client in blobSidecars") + } + + br := &blobResponse{} + err = json.Unmarshal(body, br) + if err != nil { + return nil, errors.Wrap(err, "error decoding json response in blobSidecars") + } + + if len(br.Data) == 0 { + return nil, fmt.Errorf("no blobs found for slot %d", slot) + } + + blobs := make([]kzg4844.Blob, len(versionedHashes)) + var totalFound int + + for i := range blobs { + commitmentBytes, err := hexutil.Decode(br.Data[i].KzgCommitment) + if err != nil { + return nil, fmt.Errorf("couldn't decode commitment for slot(%d) at index(%d), commitment(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgCommitment)) + } + var commitment kzg4844.Commitment + copy(commitment[:], commitmentBytes) + versionedHash := kZGToVersionedHash(commitment) + + // The versioned hashes of the blob commitments are produced in the by HASH_OPCODE_BYTE, + // presumably in the order they were added to the tx. The spec is unclear if the blobs + // need to be returned in any particular order from the beacon API, so we put them back in + // the order from the tx. + var j int + var found bool + for j = range versionedHashes { + if versionedHashes[j] == versionedHash { + found = true + totalFound++ + break + } + } + if !found { + continue + } + + blob, err := hexutil.Decode(br.Data[i].Blob) + if err != nil { + return nil, fmt.Errorf("couldn't decode blob for slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) + } + copy(blobs[j][:], blob) + + proofBytes, err := hexutil.Decode(br.Data[i].KzgProof) + if err != nil { + return nil, fmt.Errorf("couldn't decode proof for slot(%d) at index(%d), proof(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgProof)) + } + var proof kzg4844.Proof + copy(proof[:], proofBytes) + + err = kzg4844.VerifyBlobProof(blobs[j], commitment, proof) + if err != nil { + return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) + } + } + + if totalFound < len(versionedHashes) { + return nil, fmt.Errorf("not all of the requested blobs (%d/%d) were found at slot (%d), can't reconstruct batch payload", totalFound, len(versionedHashes), slot) + } + + return blobs, nil +} + +type genesisResponse struct { + GenesisTime uint64 `json:"genesis_time"` + // don't currently care about other fields, add if needed +} + +func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { + if b.cachedGenesisTime > 0 { + return b.cachedGenesisTime, nil + } + + body, err := b.bc.Get(ctx, "/eth/v1/beacon/genesis") + if err != nil { + return 0, errors.Wrap(err, "error calling beacon client in genesisTime") + } + + gr := &genesisResponse{} + dataWrapper := &struct{ Data *genesisResponse }{Data: gr} + err = json.Unmarshal(body, dataWrapper) + if err != nil { + return 0, errors.Wrap(err, "error decoding json response in genesisTime") + } + + return gr.GenesisTime, nil +} + +// The following code is taken from core/vm/contracts.go +const ( + blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. +) + +func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { + h := sha256.Sum256(kzg[:]) + h[0] = blobCommitmentVersionKZG + + return h +} diff --git a/go.mod b/go.mod index f6f6bce80d..69cbcd9884 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.9.0 github.com/multiformats/go-multihash v0.2.1 + github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 @@ -233,7 +234,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect diff --git a/util/beaconclient/client.go b/util/beaconclient/client.go new file mode 100644 index 0000000000..e2dfd8e6bf --- /dev/null +++ b/util/beaconclient/client.go @@ -0,0 +1,98 @@ +package beaconclient + +import ( + "context" + "io" + "net" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +// Client is a wrapper object around the HTTP client. +// Taken from prysm/api/client. +type Client struct { + hc *http.Client + baseURL *url.URL + token string +} + +// NewClient constructs a new client with the provided options (ex WithTimeout). +// `host` is the base host + port used to construct request urls. This value can be +// a URL string, or NewClient will assume an http endpoint if just `host:port` is used. +func NewClient(host string, opts ...ClientOpt) (*Client, error) { + u, err := urlForHost(host) + if err != nil { + return nil, err + } + c := &Client{ + hc: &http.Client{}, + baseURL: u, + } + for _, o := range opts { + o(c) + } + return c, nil +} + +// Token returns the bearer token used for jwt authentication +func (c *Client) Token() string { + return c.token +} + +// BaseURL returns the base url of the client +func (c *Client) BaseURL() *url.URL { + return c.baseURL +} + +// Do execute the request against the http client +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return c.hc.Do(req) +} + +func urlForHost(h string) (*url.URL, error) { + // try to parse as url (being permissive) + u, err := url.Parse(h) + if err == nil && u.Host != "" { + return u, nil + } + // try to parse as host:port + host, port, err := net.SplitHostPort(h) + if err != nil { + return nil, ErrMalformedHostname + } + return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil +} + +// NodeURL returns a human-readable string representation of the beacon node base url. +func (c *Client) NodeURL() string { + return c.baseURL.String() +} + +// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package. +func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) { + u := c.baseURL.ResolveReference(&url.URL{Path: path}) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + for _, o := range opts { + o(req) + } + r, err := c.hc.Do(req) + if err != nil { + return nil, err + } + defer func() { + err = r.Body.Close() + }() + if r.StatusCode != http.StatusOK { + return nil, Non200Err(r) + } + b, err := io.ReadAll(r.Body) + if err != nil { + return nil, errors.Wrap(err, "error reading http response body") + } + return b, nil +} diff --git a/util/beaconclient/errors.go b/util/beaconclient/errors.go new file mode 100644 index 0000000000..7ee88805cd --- /dev/null +++ b/util/beaconclient/errors.go @@ -0,0 +1,40 @@ +package beaconclient + +import ( + "fmt" + "io" + "net/http" + + "github.com/pkg/errors" +) + +// ErrMalformedHostname is used to indicate if a host name's format is incorrect. +var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500") + +// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code. +// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK. +var ErrNotOK = errors.New("did not receive 2xx response from API") + +// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API. +var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API") + +// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized. +var ErrInvalidNodeVersion = errors.New("invalid node version response") + +// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error. +func Non200Err(response *http.Response) error { + bodyBytes, err := io.ReadAll(response.Body) + var body string + if err != nil { + body = "(Unable to read response body.)" + } else { + body = "response body:\n" + string(bodyBytes) + } + msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body) + switch response.StatusCode { + case 404: + return errors.Wrap(ErrNotFound, msg) + default: + return errors.Wrap(ErrNotOK, msg) + } +} diff --git a/util/beaconclient/options.go b/util/beaconclient/options.go new file mode 100644 index 0000000000..98a37e17a0 --- /dev/null +++ b/util/beaconclient/options.go @@ -0,0 +1,48 @@ +package beaconclient + +import ( + "fmt" + "net/http" + "time" +) + +// ReqOption is a request functional option. +type ReqOption func(*http.Request) + +// WithSSZEncoding is a request functional option that adds SSZ encoding header. +func WithSSZEncoding() ReqOption { + return func(req *http.Request) { + req.Header.Set("Accept", "application/octet-stream") + } +} + +// WithAuthorizationToken is a request functional option that adds header for authorization token. +func WithAuthorizationToken(token string) ReqOption { + return func(req *http.Request) { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + } +} + +// ClientOpt is a functional option for the Client type (http.Client wrapper) +type ClientOpt func(*Client) + +// WithTimeout sets the .Timeout attribute of the wrapped http.Client. +func WithTimeout(timeout time.Duration) ClientOpt { + return func(c *Client) { + c.hc.Timeout = timeout + } +} + +// WithRoundTripper replaces the underlying HTTP's transport with a custom one. +func WithRoundTripper(t http.RoundTripper) ClientOpt { + return func(c *Client) { + c.hc.Transport = t + } +} + +// WithAuthenticationToken sets an oauth token to be used. +func WithAuthenticationToken(token string) ClientOpt { + return func(c *Client) { + c.token = token + } +} From 11e61aafd6b41623e5989cd64dd46ad97cfc1a9a Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:07:46 -0700 Subject: [PATCH 11/40] Implement 4844 blob reading in replay binary --- arbnode/delayed_seq_reorg_test.go | 2 +- arbnode/inbox_tracker.go | 14 ++++-- arbnode/node.go | 16 +++++- arbstate/das_reader.go | 7 +++ arbstate/inbox.go | 49 ++++++++++++++++-- arbstate/inbox_fuzz_test.go | 9 ++-- cmd/pruning/pruning.go | 2 +- cmd/replay/main.go | 32 ++++++++++-- staker/stateless_block_validator.go | 4 ++ system_tests/state_fuzz_test.go | 8 +-- util/blobs/blobs.go | 77 +++++++++++++++++++++++++++++ 11 files changed, 196 insertions(+), 24 deletions(-) create mode 100644 util/blobs/blobs.go diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index 9ad984ae6c..beb2656e2b 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -19,7 +19,7 @@ func TestSequencerReorgFromDelayed(t *testing.T) { defer cancel() exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) - tracker, err := NewInboxTracker(db, streamer, nil) + tracker, err := NewInboxTracker(db, streamer, nil, nil) Require(t, err) err = streamer.Start(ctx) diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 51f74cbeb4..eaf863bffc 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -38,12 +38,13 @@ type InboxTracker struct { mutex sync.Mutex validator *staker.BlockValidator das arbstate.DataAvailabilityReader + blobReader arbstate.BlobReader batchMetaMutex sync.Mutex batchMeta *containers.LruCache[uint64, BatchMetadata] } -func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader) (*InboxTracker, error) { +func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arbstate.DataAvailabilityReader, blobReader arbstate.BlobReader) (*InboxTracker, error) { // We support a nil txStreamer for the pruning code if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && das == nil { return nil, errors.New("data availability service required but unconfigured") @@ -52,6 +53,7 @@ func NewInboxTracker(db ethdb.Database, txStreamer *TransactionStreamer, das arb db: db, txStreamer: txStreamer, das: das, + blobReader: blobReader, batchMeta: containers.NewLruCache[uint64, BatchMetadata](1000), } return tracker, nil @@ -504,11 +506,12 @@ type multiplexerBackend struct { inbox *InboxTracker } -func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, error) { +func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if len(b.batches) == 0 { - return nil, errors.New("read past end of specified sequencer batches") + return nil, common.Hash{}, errors.New("read past end of specified sequencer batches") } - return b.batches[0].Serialize(b.ctx, b.client) + bytes, err := b.batches[0].Serialize(b.ctx, b.client) + return bytes, b.batches[0].BlockHash, err } func (b *multiplexerBackend) GetSequencerInboxPosition() uint64 { @@ -603,7 +606,8 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client arbutil.L ctx: ctx, client: client, } - multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, arbstate.KeysetValidate) + + multiplexer := arbstate.NewInboxMultiplexer(backend, prevbatchmeta.DelayedMessageCount, t.das, t.blobReader, arbstate.KeysetValidate) batchMessageCounts := make(map[uint64]arbutil.MessageIndex) currentpos := prevbatchmeta.MessageCount + 1 for { diff --git a/arbnode/node.go b/arbnode/node.go index 6119a4fb5e..99ecb541ee 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -26,6 +26,7 @@ import ( "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbnode/resourcemanager" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" @@ -39,6 +40,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" + "github.com/offchainlabs/nitro/util/beaconclient" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" @@ -85,6 +87,7 @@ type Config struct { Staker staker.L1ValidatorConfig `koanf:"staker" reload:"hot"` SeqCoordinator SeqCoordinatorConfig `koanf:"seq-coordinator"` DataAvailability das.DataAvailabilityConfig `koanf:"data-availability"` + BlobClient BlobClientConfig `koanf:"blob-client"` SyncMonitor SyncMonitorConfig `koanf:"sync-monitor"` Dangerous DangerousConfig `koanf:"dangerous"` TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` @@ -142,6 +145,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed staker.L1ValidatorConfigAddOptions(prefix+".staker", f) SeqCoordinatorConfigAddOptions(prefix+".seq-coordinator", f) das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) + BlobClientAddOptions(prefix+".blob-client", f) SyncMonitorConfigAddOptions(prefix+".sync-monitor", f) DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) @@ -512,7 +516,17 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } - inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader) + var blobReader arbstate.BlobReader + if config.BlobClient.BeaconChainUrl != "" { + bc, err := beaconclient.NewClient(config.BlobClient.BeaconChainUrl) + if err != nil { + return nil, err + } + + blobReader = NewBlobClient(bc, l1client) + } + + inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) if err != nil { return nil, err } diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index a6d351b49e..5f568605b1 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -40,6 +40,9 @@ const L1AuthenticatedMessageHeaderFlag byte = 0x40 // ZeroheavyMessageHeaderFlag indicates that this message is zeroheavy-encoded. const ZeroheavyMessageHeaderFlag byte = 0x20 +// BlobHashesHeaderFlag indicates that this message contains EIP 4844 versioned hashes of the committments calculated over the blob data for the batch data. +const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x50 + // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 @@ -55,6 +58,10 @@ func IsZeroheavyEncodedHeaderByte(header byte) bool { return (ZeroheavyMessageHeaderFlag & header) > 0 } +func IsBlobHashesHeaderByte(header byte) bool { + return (BlobHashesHeaderFlag & header) > 0 +} + func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 3995bcf308..cf8f61e97a 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -8,11 +8,13 @@ import ( "context" "encoding/binary" "errors" + "fmt" "io" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -21,11 +23,12 @@ import ( "github.com/offchainlabs/nitro/arbos/l1pricing" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/das/dastree" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/zeroheavy" ) type InboxBackend interface { - PeekSequencerInbox() ([]byte, error) + PeekSequencerInbox() ([]byte, common.Hash, error) GetSequencerInboxPosition() uint64 AdvanceSequencerInbox() @@ -36,6 +39,14 @@ type InboxBackend interface { ReadDelayedInbox(seqNum uint64) (*arbostypes.L1IncomingMessage, error) } +type BlobReader interface { + GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, + ) ([]kzg4844.Blob, error) +} + type sequencerMessage struct { minTimestamp uint64 maxTimestamp uint64 @@ -50,7 +61,7 @@ const maxZeroheavyDecompressedLen = 101*MaxDecompressedLen/100 + 64 const MaxSegmentsPerSequencerMessage = 100 * 1024 const MinLifetimeSecondsForDataAvailabilityCert = 7 * 24 * 60 * 60 // one week -func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { +func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash common.Hash, data []byte, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) (*sequencerMessage, error) { if len(data) < 40 { return nil, errors.New("sequencer message missing L1 header") } @@ -79,6 +90,31 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, data []byte, da } } + if len(payload) > 0 && IsBlobHashesHeaderByte(payload[0]) { + blobHashes := payload[1:] + if len(blobHashes)%len(common.Hash{}) != 0 { + return nil, fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(blobHashes)/len(common.Hash{})) + for i := 0; i*32 < len(blobHashes); i += 1 { + copy(versionedHashes[i][:], blobHashes[i*32:(i+1)*32]) + } + + if blobReader == nil { + return nil, errors.New("blob batch payload was encountered but no BlobReader was configured") + } + + kzgBlobs, err := blobReader.GetBlobs(ctx, batchBlockHash, versionedHashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs: %w", err) + } + payload, err = blobs.DecodeBlobs(kzgBlobs) + if err != nil { + log.Warn("Failed to decode blobs", "batchBlockHash", batchBlockHash, "versionedHashes", versionedHashes, "err", err) + return parsedMsg, nil + } + } + if len(payload) > 0 && IsZeroheavyEncodedHeaderByte(payload[0]) { pl, err := io.ReadAll(io.LimitReader(zeroheavy.NewZeroheavyDecoder(bytes.NewReader(payload[1:])), int64(maxZeroheavyDecompressedLen))) if err != nil { @@ -242,6 +278,7 @@ type inboxMultiplexer struct { backend InboxBackend delayedMessagesRead uint64 dasReader DataAvailabilityReader + blobReader BlobReader cachedSequencerMessage *sequencerMessage cachedSequencerMessageNum uint64 cachedSegmentNum uint64 @@ -251,11 +288,12 @@ type inboxMultiplexer struct { keysetValidationMode KeysetValidationMode } -func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { +func NewInboxMultiplexer(backend InboxBackend, delayedMessagesRead uint64, dasReader DataAvailabilityReader, blobReader BlobReader, keysetValidationMode KeysetValidationMode) arbostypes.InboxMultiplexer { return &inboxMultiplexer{ backend: backend, delayedMessagesRead: delayedMessagesRead, dasReader: dasReader, + blobReader: blobReader, keysetValidationMode: keysetValidationMode, } } @@ -270,13 +308,14 @@ const BatchSegmentKindAdvanceL1BlockNumber uint8 = 4 // Note: this does *not* return parse errors, those are transformed into invalid messages func (r *inboxMultiplexer) Pop(ctx context.Context) (*arbostypes.MessageWithMetadata, error) { if r.cachedSequencerMessage == nil { - bytes, realErr := r.backend.PeekSequencerInbox() + // Note: batchBlockHash will be zero in the replay binary, but that's fine + bytes, batchBlockHash, realErr := r.backend.PeekSequencerInbox() if realErr != nil { return nil, realErr } r.cachedSequencerMessageNum = r.backend.GetSequencerInboxPosition() var err error - r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, bytes, r.dasReader, r.keysetValidationMode) + r.cachedSequencerMessage, err = parseSequencerMessage(ctx, r.cachedSequencerMessageNum, batchBlockHash, bytes, r.dasReader, r.blobReader, r.keysetValidationMode) if err != nil { return nil, err } diff --git a/arbstate/inbox_fuzz_test.go b/arbstate/inbox_fuzz_test.go index fcb80cbd73..dcf43fd0da 100644 --- a/arbstate/inbox_fuzz_test.go +++ b/arbstate/inbox_fuzz_test.go @@ -9,6 +9,7 @@ import ( "errors" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/offchainlabs/nitro/arbos/arbostypes" ) @@ -19,11 +20,11 @@ type multiplexerBackend struct { positionWithinMessage uint64 } -func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, error) { +func (b *multiplexerBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if b.batchSeqNum != 0 { - return nil, errors.New("reading unknown sequencer batch") + return nil, common.Hash{}, errors.New("reading unknown sequencer batch") } - return b.batch, nil + return b.batch, common.Hash{}, nil } func (b *multiplexerBackend) GetSequencerInboxPosition() uint64 { @@ -66,7 +67,7 @@ func FuzzInboxMultiplexer(f *testing.F) { delayedMessage: delayedMsg, positionWithinMessage: 0, } - multiplexer := NewInboxMultiplexer(backend, 0, nil, KeysetValidate) + multiplexer := NewInboxMultiplexer(backend, 0, nil, nil, KeysetValidate) _, err := multiplexer.Pop(context.TODO()) if err != nil { panic(err) diff --git a/cmd/pruning/pruning.go b/cmd/pruning/pruning.go index 68d89302f0..da015ac52c 100644 --- a/cmd/pruning/pruning.go +++ b/cmd/pruning/pruning.go @@ -189,7 +189,7 @@ func findImportantRoots(ctx context.Context, chainDb ethdb.Database, stack *node return nil, fmt.Errorf("failed to get finalized block: %w", err) } l1BlockNum := l1Block.NumberU64() - tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil) + tracker, err := arbnode.NewInboxTracker(arbDb, nil, nil, nil) if err != nil { return nil, err } diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 2fb13ceed8..b634a2d5bb 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -62,11 +63,12 @@ func (c WavmChainContext) GetHeader(hash common.Hash, num uint64) *types.Header type WavmInbox struct{} -func (i WavmInbox) PeekSequencerInbox() ([]byte, error) { +func (i WavmInbox) PeekSequencerInbox() ([]byte, common.Hash, error) { pos := wavmio.GetInboxPosition() res := wavmio.ReadInboxMessage(pos) log.Info("PeekSequencerInbox", "pos", pos, "res[:8]", res[:8]) - return res, nil + // Our BlobPreimageReader doesn't need the block hash + return res, common.Hash{}, nil } func (i WavmInbox) GetSequencerInboxPosition() uint64 { @@ -117,6 +119,30 @@ func (dasReader *PreimageDASReader) ExpirationPolicy(ctx context.Context) (arbst return arbstate.DiscardImmediately, nil } +type BlobPreimageReader struct { +} + +func (r *BlobPreimageReader) GetBlobs( + ctx context.Context, + batchBlockHash common.Hash, + versionedHashes []common.Hash, +) ([]kzg4844.Blob, error) { + var blobs []kzg4844.Blob + for _, h := range versionedHashes { + var blob kzg4844.Blob + var preimage []byte + if true { + panic("TODO: fill in preimage with wavmio.ResolvePreimage(h, wavmio.PreimageTypeEthVersionedHash) once KZG proof support is merged into this branch") + } + if len(preimage) != len(blob) { + return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) + } + copy(blob[:], preimage) + blobs = append(blobs, blob) + } + return blobs, nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -180,7 +206,7 @@ func main() { if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = arbstate.KeysetDontValidate } - inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, keysetValidationMode) + inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dasReader, &BlobPreimageReader{}, keysetValidationMode) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) if err != nil { diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index acd86f8627..d35304be27 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -284,6 +284,10 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * if len(batch.Data) <= 40 { continue } + if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { + // TODO: fetch blob preimages + panic("TODO: fetch blob preimages") + } if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { continue } diff --git a/system_tests/state_fuzz_test.go b/system_tests/state_fuzz_test.go index b14215fbf0..28bcbec9b4 100644 --- a/system_tests/state_fuzz_test.go +++ b/system_tests/state_fuzz_test.go @@ -41,7 +41,7 @@ func BuildBlock( if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, arbstate.KeysetValidate) + inboxMultiplexer := arbstate.NewInboxMultiplexer(inbox, delayedMessagesRead, nil, nil, arbstate.KeysetValidate) ctx := context.Background() message, err := inboxMultiplexer.Pop(ctx) @@ -69,11 +69,11 @@ type inboxBackend struct { delayedMessages [][]byte } -func (b *inboxBackend) PeekSequencerInbox() ([]byte, error) { +func (b *inboxBackend) PeekSequencerInbox() ([]byte, common.Hash, error) { if len(b.batches) == 0 { - return nil, errors.New("read past end of specified sequencer batches") + return nil, common.Hash{}, errors.New("read past end of specified sequencer batches") } - return b.batches[0], nil + return b.batches[0], common.Hash{}, nil } func (b *inboxBackend) GetSequencerInboxPosition() uint64 { diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go new file mode 100644 index 0000000000..c8025dc253 --- /dev/null +++ b/util/blobs/blobs.go @@ -0,0 +1,77 @@ +// Copyright 2023-2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package blobs + +import ( + "crypto/sha256" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +// EncodeBlobs takes in raw bytes data to convert into blobs used for KZG commitment EIP-4844 +// transactions on Ethereum. +func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { + data, err := rlp.EncodeToBytes(data) + if err != nil { + return nil, err + } + blobs := []kzg4844.Blob{{}} + blobIndex := 0 + fieldIndex := -1 + for i := 0; i < len(data); i += 31 { + fieldIndex++ + if fieldIndex == params.BlobTxFieldElementsPerBlob { + blobs = append(blobs, kzg4844.Blob{}) + blobIndex++ + fieldIndex = 0 + } + max := i + 31 + if max > len(data) { + max = len(data) + } + copy(blobs[blobIndex][fieldIndex*32+1:], data[i:max]) + } + return blobs, nil +} + +// DecodeBlobs decodes blobs into the batch data encoded in them. +func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { + var rlpData []byte + for _, blob := range blobs { + for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { + rlpData = append(rlpData, blob[fieldIndex*32+1:(fieldIndex+1)*32]...) + } + } + var outputData []byte + err := rlp.DecodeBytes(rlpData, &outputData) + return outputData, err +} + +// Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs +func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) { + commitments := make([]kzg4844.Commitment, len(blobs)) + proofs := make([]kzg4844.Proof, len(blobs)) + versionedHashes := make([]common.Hash, len(blobs)) + + for i := range blobs { + var err error + commitments[i], err = kzg4844.BlobToCommitment(blobs[i]) + if err != nil { + return nil, nil, nil, err + } + proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) + if err != nil { + return nil, nil, nil, err + } + // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. + hash := sha256.Sum256(commitments[i][:]) + hash[0] = 1 + versionedHashes[i] = hash + } + + return commitments, proofs, versionedHashes, nil +} From 511480e86ba87752b7d06977f6fafee447e9f5e0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:14:21 -0700 Subject: [PATCH 12/40] Add foundry to CI --- .github/workflows/arbitrator-ci.yml | 5 +++++ .github/workflows/ci.yml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 8c491a421c..96080831c4 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -96,6 +96,11 @@ jobs: make -j make install + - name: Install foundry + run: | + curl -L https://foundry.paradigm.xyz | bash + foundryup + - name: Cache cbrotli uses: actions/cache@v3 id: cache-cbrotli diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f2c4fac84c..7ebc3b8734 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,6 +58,11 @@ jobs: with: targets: 'wasm32-unknown-unknown, wasm32-wasi' + - name: Install foundry + run: | + curl -L https://foundry.paradigm.xyz | bash + foundryup + - name: Cache Build Products uses: actions/cache@v3 with: From 5377d0f8e527240bd24b567c106945c163502b34 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:21:04 -0700 Subject: [PATCH 13/40] Install foundry in docker and fix CI --- .github/workflows/arbitrator-ci.yml | 6 ++---- .github/workflows/ci.yml | 6 ++---- Dockerfile | 5 +++-- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 96080831c4..571f3ca983 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -96,10 +96,8 @@ jobs: make -j make install - - name: Install foundry - run: | - curl -L https://foundry.paradigm.xyz | bash - foundryup + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 - name: Cache cbrotli uses: actions/cache@v3 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7ebc3b8734..fbf00bcb2d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,10 +58,8 @@ jobs: with: targets: 'wasm32-unknown-unknown, wasm32-wasi' - - name: Install foundry - run: | - curl -L https://foundry.paradigm.xyz | bash - foundryup + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 - name: Cache Build Products uses: actions/cache@v3 diff --git a/Dockerfile b/Dockerfile index b62e569259..ea32941903 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,13 +26,14 @@ COPY --from=brotli-library-builder /workspace/install/ / FROM node:16-bullseye-slim as contracts-builder RUN apt-get update && \ - apt-get install -y git python3 make g++ + apt-get install -y git python3 make g++ curl +RUN curl -L https://foundry.paradigm.xyz | bash && . ~/.bashrc && ~/.foundry/bin/foundryup WORKDIR /workspace COPY contracts/package.json contracts/yarn.lock contracts/ RUN cd contracts && yarn install COPY contracts contracts/ COPY Makefile . -RUN NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity +RUN . ~/.bashrc && NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-solidity FROM debian:bullseye-20211220 as wasm-base WORKDIR /workspace From 6cca7042da059ddfbec16ff35a52139f424fc47e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 21:32:34 -0700 Subject: [PATCH 14/40] Fix docker build --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index ea32941903..173c0ff2a4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -184,6 +184,7 @@ COPY fastcache/go.mod fastcache/go.sum fastcache/ RUN go mod download COPY . ./ COPY --from=contracts-builder workspace/contracts/build/ contracts/build/ +COPY --from=contracts-builder workspace/contracts/out/ contracts/out/ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/ COPY --from=contracts-builder workspace/.make/ .make/ COPY --from=prover-header-export / target/ From 609b4cf43a2279703282cdd779c6ac01fa106868 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 22 Jan 2024 23:59:07 -0700 Subject: [PATCH 15/40] Add 4844 blob tx support to data poster (rbf is wip) --- arbnode/batch_poster.go | 1 + arbnode/dataposter/data_poster.go | 178 ++++++++++++++++++++------ arbnode/dataposter/storage/storage.go | 10 +- arbnode/dataposter/storage_test.go | 2 +- staker/validatorwallet/contract.go | 7 +- staker/validatorwallet/eoa.go | 3 +- 6 files changed, 152 insertions(+), 49 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 07034ee6f8..01a84b1c43 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -1071,6 +1071,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) data, gasLimit, new(big.Int), + nil, // TODO: use blobs accessList, ) if err != nil { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 09f3e218b1..425dba8e18 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -23,7 +23,9 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -31,12 +33,14 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/go-redis/redis/v8" + "github.com/holiman/uint256" "github.com/offchainlabs/nitro/arbnode/dataposter/dbstorage" "github.com/offchainlabs/nitro/arbnode/dataposter/noop" "github.com/offchainlabs/nitro/arbnode/dataposter/slice" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -64,6 +68,7 @@ type DataPoster struct { metadataRetriever func(ctx context.Context, blockNum *big.Int) ([]byte, error) extraBacklog func() uint64 parentChainID *big.Int + parentChainID256 *uint256.Int // These fields are protected by the mutex. // TODO: factor out these fields into separate structure, since now one @@ -177,6 +182,11 @@ func NewDataPoster(ctx context.Context, opts *DataPosterOpts) (*DataPoster, erro extraBacklog: opts.ExtraBacklog, parentChainID: opts.ParentChainID, } + var overflow bool + dp.parentChainID256, overflow = uint256.FromBig(opts.ParentChainID) + if overflow { + return nil, fmt.Errorf("parent chain ID %v overflows uint256 (necessary for blob transactions)", opts.ParentChainID) + } if dp.extraBacklog == nil { dp.extraBacklog = func() uint64 { return 0 } } @@ -363,7 +373,7 @@ func (p *DataPoster) getNextNonceAndMaybeMeta(ctx context.Context) (uint64, []by return 0, nil, false, fmt.Errorf("fetching last element from queue: %w", err) } if lastQueueItem != nil { - nextNonce := lastQueueItem.Data.Nonce + 1 + nextNonce := lastQueueItem.FullTx.Nonce() + 1 if err := p.canPostWithNonce(ctx, nextNonce); err != nil { return 0, nil, false, err } @@ -442,27 +452,34 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur var big4 = big.NewInt(4) // The dataPosterBacklog argument should *not* include extraBacklog (it's added in in this function) -func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, error) { +func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, numBlobs int, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, dataPosterBacklog uint64) (*big.Int, *big.Int, *big.Int, error) { config := p.config() dataPosterBacklog += p.extraBacklog() latestHeader, err := p.headerReader.LastHeader(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if latestHeader.BaseFee == nil { - return nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) + } + newBlobFeeCap := big.NewInt(0) + if latestHeader.ExcessBlobGas != nil { + newBlobFeeCap = eip4844.CalcBlobFee(*latestHeader.ExcessBlobGas) + newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) + } else if numBlobs > 0 { + return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing ExcessBlobGas but blobs were specified in data poster transaction (either the parent chain node is not synced or EIP-4844 was improperly activated)", latestHeader.Number) } softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) softConfNonce, err := p.client.NonceAt(ctx, p.Sender(), softConfBlock) if err != nil { - return nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) + return nil, nil, nil, fmt.Errorf("failed to get latest nonce %v blocks ago (block %v): %w", config.NonceRbfSoftConfs, softConfBlock, err) } - newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, big.NewInt(2)) + newFeeCap := new(big.Int).Mul(latestHeader.BaseFee, common.Big2) newFeeCap = arbmath.BigMax(newFeeCap, arbmath.FloatToBig(config.MinFeeCapGwei*params.GWei)) newTipCap, err := p.client.SuggestGasTipCap(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } newTipCap = arbmath.BigMax(newTipCap, arbmath.FloatToBig(config.MinTipCapGwei*params.GWei)) newTipCap = arbmath.BigMin(newTipCap, arbmath.FloatToBig(config.MaxTipCapGwei*params.GWei)) @@ -481,10 +498,13 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newFeeCap = arbmath.BigMax(newFeeCap, arbmath.BigMulByBips(lastFeeCap, minRbfIncrease)) } + // TODO: if we're significantly increasing the blob fee cap, we also need to increase the fee cap my minRbfIncrease + // TODO: look more into geth's blob mempool and make sure this behavior conforms (I think minRbfIncrease might be higher there) + elapsed := time.Since(dataCreatedAt) maxFeeCap, err := p.evalMaxFeeCapExpr(dataPosterBacklog, elapsed) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if arbmath.BigGreaterThan(newFeeCap, maxFeeCap) { log.Warn( @@ -496,6 +516,8 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newFeeCap = maxFeeCap } + // TODO: also have an expression limiting the max blob fee cap + latestBalance := p.balance balanceForTx := new(big.Int).Set(latestBalance) if config.AllocateMempoolBalance && !p.usingNoOpStorage { @@ -525,6 +547,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u balanceForTx.Div(balanceForTx, arbmath.UintToBig(config.MaxMempoolTransactions-1)) } } + // TODO: take into account blob costs balanceFeeCap := arbmath.BigDivByUint(balanceForTx, gasLimit) if arbmath.BigGreaterThan(newFeeCap, balanceFeeCap) { log.Warn( @@ -550,10 +573,14 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u newTipCap = new(big.Int).Set(newFeeCap) } - return newFeeCap, newTipCap, nil + return newFeeCap, newTipCap, newBlobFeeCap, nil } -func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, accessList types.AccessList) (*types.Transaction, error) { +func (p *DataPoster) PostSimpleTransaction(ctx context.Context, nonce uint64, to common.Address, calldata []byte, gasLimit uint64, value *big.Int) (*types.Transaction, error) { + return p.PostTransaction(ctx, time.Now(), nonce, nil, to, calldata, gasLimit, value, nil, nil) +} + +func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Time, nonce uint64, meta []byte, to common.Address, calldata []byte, gasLimit uint64, value *big.Int, kzgBlobs []kzg4844.Blob, accessList types.AccessList) (*types.Transaction, error) { p.mutex.Lock() defer p.mutex.Unlock() @@ -570,27 +597,65 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim return nil, fmt.Errorf("failed to update data poster balance: %w", err) } - feeCap, tipCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, nil, nil, dataCreatedAt, 0) + feeCap, tipCap, blobFeeCap, err := p.feeAndTipCaps(ctx, nonce, gasLimit, len(kzgBlobs), nil, nil, dataCreatedAt, 0) if err != nil { return nil, err } - inner := types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: tipCap, - GasFeeCap: feeCap, - Gas: gasLimit, - To: &to, - Value: value, - Data: calldata, - AccessList: accessList, - ChainID: p.parentChainID, - } - fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(&inner)) + + var deprecatedData types.DynamicFeeTx + var inner types.TxData + if len(kzgBlobs) > 0 { + value256, overflow := uint256.FromBig(value) + if overflow { + return nil, fmt.Errorf("blob transaction callvalue %v overflows uint256", value) + } + // Intentionally break out of date data poster redis clients, + // so they don't try to replace by fee a tx they don't understand + deprecatedData.Nonce = ^uint64(0) + commitments, proofs, blobHashes, err := blobs.ComputeCommitmentsProofsAndHashes(kzgBlobs) + if err != nil { + return nil, fmt.Errorf("failed to compute KZG metadata: %w", err) + } + inner = &types.BlobTx{ + Nonce: nonce, + Gas: gasLimit, + To: to, + Value: value256, + Data: calldata, + Sidecar: &types.BlobTxSidecar{ + Blobs: kzgBlobs, + Commitments: commitments, + Proofs: proofs, + }, + BlobHashes: blobHashes, + AccessList: accessList, + ChainID: p.parentChainID256, + } + // reuse the code to convert gas fee and tip caps to uint256s + inner, err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) + if err != nil { + return nil, err + } + } else { + deprecatedData = types.DynamicFeeTx{ + Nonce: nonce, + GasFeeCap: feeCap, + GasTipCap: tipCap, + Gas: gasLimit, + To: &to, + Value: value, + Data: calldata, + AccessList: accessList, + ChainID: p.parentChainID, + } + inner = &deprecatedData + } + fullTx, err := p.signer(ctx, p.Sender(), types.NewTx(inner)) if err != nil { return nil, fmt.Errorf("signing transaction: %w", err) } queuedTx := storage.QueuedTransaction{ - Data: inner, + DeprecatedData: deprecatedData, FullTx: fullTx, Meta: meta, Sent: false, @@ -603,8 +668,8 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim // the mutex must be held by the caller func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTransaction) error { if prevTx != nil { - if prevTx.Data.Nonce != newTx.Data.Nonce { - return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) + if prevTx.FullTx.Nonce() != newTx.FullTx.Nonce() { + return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.FullTx.Nonce(), newTx.FullTx.Nonce()) } // Check if prevTx is the same as newTx and we don't need to do anything @@ -621,7 +686,7 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr return nil } } - if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { + if err := p.queue.Put(ctx, newTx.FullTx.Nonce(), prevTx, newTx); err != nil { return fmt.Errorf("putting new tx in the queue: %w", err) } return nil @@ -645,22 +710,57 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return p.saveTx(ctx, newTx, &newerTx) } +func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (types.TxData, error) { + switch data := data.(type) { + case *types.DynamicFeeTx: + data.GasFeeCap = newFeeCap + data.GasTipCap = newTipCap + return data, nil + case *types.BlobTx: + var overflow bool + data.GasFeeCap, overflow = uint256.FromBig(newFeeCap) + if overflow { + return nil, fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) + } + data.GasTipCap, overflow = uint256.FromBig(newTipCap) + if overflow { + return nil, fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) + } + data.BlobFeeCap, overflow = uint256.FromBig(newBlobFeeCap) + if overflow { + return nil, fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) + } + return data, nil + default: + return nil, fmt.Errorf("unexpected transaction data type %T", data) + } +} + +func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (*types.Transaction, error) { + data, err := updateTxDataGasCaps(tx.GetInner(), newFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + return nil, err + } + return types.NewTx(data), nil +} + // The mutex must be held by the caller. func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransaction, backlogOfBatches uint64) error { - newFeeCap, newTipCap, err := p.feeAndTipCaps(ctx, prevTx.Data.Nonce, prevTx.Data.Gas, prevTx.Data.GasFeeCap, prevTx.Data.GasTipCap, prevTx.Created, backlogOfBatches) + newFeeCap, newTipCap, newBlobFeeCap, err := p.feeAndTipCaps(ctx, prevTx.FullTx.Nonce(), prevTx.FullTx.Gas(), len(prevTx.FullTx.BlobHashes()), prevTx.FullTx.GasFeeCap(), prevTx.FullTx.GasTipCap(), prevTx.Created, backlogOfBatches) if err != nil { return err } - minNewFeeCap := arbmath.BigMulByBips(prevTx.Data.GasFeeCap, minRbfIncrease) + minNewFeeCap := arbmath.BigMulByBips(prevTx.FullTx.GasFeeCap(), minRbfIncrease) newTx := *prevTx + // TODO: also look at the blob fee cap if newFeeCap.Cmp(minNewFeeCap) < 0 { log.Debug( "no need to replace by fee transaction", - "nonce", prevTx.Data.Nonce, - "lastFeeCap", prevTx.Data.GasFeeCap, + "nonce", prevTx.FullTx.Nonce(), + "lastFeeCap", prevTx.FullTx.GasFeeCap(), "recommendedFeeCap", newFeeCap, - "lastTipCap", prevTx.Data.GasTipCap, + "lastTipCap", prevTx.FullTx.GasTipCap(), "recommendedTipCap", newTipCap, ) newTx.NextReplacement = time.Now().Add(time.Minute) @@ -676,9 +776,13 @@ func (p *DataPoster) replaceTx(ctx context.Context, prevTx *storage.QueuedTransa break } newTx.Sent = false - newTx.Data.GasFeeCap = newFeeCap - newTx.Data.GasTipCap = newTipCap - newTx.FullTx, err = p.signer(ctx, p.Sender(), types.NewTx(&newTx.Data)) + newTx.DeprecatedData.GasFeeCap = newFeeCap + newTx.DeprecatedData.GasTipCap = newTipCap + unsignedTx, err := updateGasCaps(newTx.FullTx, newFeeCap, newTipCap, newBlobFeeCap) + if err != nil { + return err + } + newTx.FullTx, err = p.signer(ctx, p.Sender(), unsignedTx) if err != nil { return err } @@ -750,7 +854,7 @@ func (p *DataPoster) updateBalance(ctx context.Context) error { const maxConsecutiveIntermittentErrors = 10 func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg string) { - nonce := tx.Data.Nonce + nonce := tx.FullTx.Nonce() if err == nil { delete(p.errorCount, nonce) return @@ -764,7 +868,7 @@ func (p *DataPoster) maybeLogError(err error, tx *storage.QueuedTransaction, msg } else { delete(p.errorCount, nonce) } - logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.Data.GasFeeCap, "tipCap", tx.Data.GasTipCap, "gas", tx.Data.Gas) + logLevel(msg, "err", err, "nonce", nonce, "feeCap", tx.FullTx.GasFeeCap(), "tipCap", tx.FullTx.GasTipCap(), "gas", tx.FullTx.Gas()) } const minWait = time.Second * 10 diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index a9e78fcc58..9586b9c9a9 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -27,7 +27,7 @@ var ( type QueuedTransaction struct { FullTx *types.Transaction - Data types.DynamicFeeTx + DeprecatedData types.DynamicFeeTx // FullTx should be used instead Meta []byte Sent bool Created time.Time // may be earlier than the tx was given to the tx poster @@ -46,7 +46,7 @@ type queuedTransactionForEncoding struct { func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { return rlp.Encode(w, queuedTransactionForEncoding{ FullTx: qt.FullTx, - Data: qt.Data, + Data: qt.DeprecatedData, Meta: qt.Meta, Sent: qt.Sent, Created: (RlpTime)(qt.Created), @@ -60,7 +60,7 @@ func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { return err } qt.FullTx = qtEnc.FullTx - qt.Data = qtEnc.Data + qt.DeprecatedData = qtEnc.Data qt.Meta = qtEnc.Meta qt.Sent = qtEnc.Sent qt.Created = time.Time(qtEnc.Created) @@ -107,7 +107,7 @@ func LegacyToQueuedTransaction(legacyQT *LegacyQueuedTransaction) (*QueuedTransa } return &QueuedTransaction{ FullTx: legacyQT.FullTx, - Data: legacyQT.Data, + DeprecatedData: legacyQT.Data, Meta: meta, Sent: legacyQT.Sent, Created: legacyQT.Created, @@ -127,7 +127,7 @@ func QueuedTransactionToLegacy(qt *QueuedTransaction) (*LegacyQueuedTransaction, } return &LegacyQueuedTransaction{ FullTx: qt.FullTx, - Data: qt.Data, + Data: qt.DeprecatedData, Meta: meta, Sent: qt.Sent, Created: qt.Created, diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index cf9918941e..f98c120f38 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -84,7 +84,7 @@ func valueOf(t *testing.T, i int) *storage.QueuedTransaction { big.NewInt(int64(i)), []byte{byte(i)}), Meta: meta, - Data: types.DynamicFeeTx{ + DeprecatedData: types.DynamicFeeTx{ ChainID: big.NewInt(int64(i)), Nonce: uint64(i), GasTipCap: big.NewInt(int64(i)), diff --git a/staker/validatorwallet/contract.go b/staker/validatorwallet/contract.go index 774e9ab407..deed7942ab 100644 --- a/staker/validatorwallet/contract.go +++ b/staker/validatorwallet/contract.go @@ -10,7 +10,6 @@ import ( "math/big" "strings" "sync/atomic" - "time" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" @@ -177,7 +176,7 @@ func (v *Contract) executeTransaction(ctx context.Context, tx *types.Transaction if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) + return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } func (v *Contract) populateWallet(ctx context.Context, createIfMissing bool) error { @@ -288,7 +287,7 @@ func (v *Contract) ExecuteTransactions(ctx context.Context, builder *txbuilder.B if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - arbTx, err := v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), txData, gas, auth.Value, nil) + arbTx, err := v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), txData, gas, auth.Value) if err != nil { return nil, err } @@ -338,7 +337,7 @@ func (v *Contract) TimeoutChallenges(ctx context.Context, challenges []uint64) ( if err != nil { return nil, fmt.Errorf("getting gas for tx data: %w", err) } - return v.dataPoster.PostTransaction(ctx, time.Now(), auth.Nonce.Uint64(), nil, *v.Address(), data, gas, auth.Value, nil) + return v.dataPoster.PostSimpleTransaction(ctx, auth.Nonce.Uint64(), *v.Address(), data, gas, auth.Value) } // gasForTxData returns auth.GasLimit if it's nonzero, otherwise returns estimate. diff --git a/staker/validatorwallet/eoa.go b/staker/validatorwallet/eoa.go index 44af5e2b60..3ae305b36c 100644 --- a/staker/validatorwallet/eoa.go +++ b/staker/validatorwallet/eoa.go @@ -6,7 +6,6 @@ package validatorwallet import ( "context" "fmt" - "time" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -95,7 +94,7 @@ func (w *EOA) postTransaction(ctx context.Context, baseTx *types.Transaction) (* return nil, err } gas := baseTx.Gas() + w.getExtraGas() - newTx, err := w.dataPoster.PostTransaction(ctx, time.Now(), nonce, nil, *baseTx.To(), baseTx.Data(), gas, baseTx.Value(), nil) + newTx, err := w.dataPoster.PostSimpleTransaction(ctx, nonce, *baseTx.To(), baseTx.Data(), gas, baseTx.Value()) if err != nil { return nil, fmt.Errorf("post transaction: %w", err) } From d3d0e0fb1270ce82f82a4a12c57b589fa18e24b7 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 00:25:54 -0700 Subject: [PATCH 16/40] Add support for 4844 batch posting --- arbnode/batch_poster.go | 138 ++++++++++++++++++++++++------ arbnode/dataposter/data_poster.go | 10 ++- arbutil/wait_for_l1.go | 2 + util/blobs/blobs.go | 28 ++++-- 4 files changed, 140 insertions(+), 38 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 01a84b1c43..2dc9bac340 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -18,15 +18,18 @@ import ( "github.com/andybalholm/brotli" "github.com/spf13/pflag" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/storage" @@ -40,6 +43,7 @@ import ( "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/util" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -48,7 +52,16 @@ import ( var ( batchPosterWalletBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/wallet/balanceether", nil) batchPosterGasRefunderBalance = metrics.NewRegisteredGaugeFloat64("arb/batchposter/gasrefunder/balanceether", nil) + + usableBytesInBlob = big.NewInt(int64(len(kzg4844.Blob{}) * 31 / 32)) + blobTxBlobGasPerBlob = big.NewInt(params.BlobTxBlobGasPerBlob) +) + +const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" + + sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlob" // TODO: this will probably get updated to have a plural name ) type batchPosterPosition struct { @@ -119,6 +132,7 @@ type BatchPosterConfig struct { RedisUrl string `koanf:"redis-url"` RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` + Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -166,6 +180,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Duration(prefix+".das-retention-period", DefaultBatchPosterConfig.DASRetentionPeriod, "In AnyTrust mode, the period which DASes are requested to retain the stored batches.") f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") + f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -188,6 +203,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 50_000, + Post4844Blobs: true, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -215,6 +231,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ DASRetentionPeriod: time.Hour * 24 * 15, GasRefunderAddress: "", ExtraBatchGas: 10_000, + Post4844Blobs: true, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -753,30 +770,73 @@ func (s *batchSegments) CloseAndGetBytes() ([]byte, error) { return fullMsg, nil } -func (b *BatchPoster) encodeAddBatch(seqNum *big.Int, prevMsgNum arbutil.MessageIndex, newMsgNum arbutil.MessageIndex, message []byte, delayedMsg uint64) ([]byte, error) { - method, ok := b.seqInboxABI.Methods["addSequencerL2BatchFromOrigin0"] +func (b *BatchPoster) encodeAddBatch( + seqNum *big.Int, + prevMsgNum arbutil.MessageIndex, + newMsgNum arbutil.MessageIndex, + l2MessageData []byte, + delayedMsg uint64, + use4844 bool, +) ([]byte, []kzg4844.Blob, error) { + methodName := sequencerBatchPostMethodName + if use4844 { + methodName = sequencerBatchPostWithBlobsMethodName + } + method, ok := b.seqInboxABI.Methods[methodName] if !ok { - return nil, errors.New("failed to find add batch method") - } - inputData, err := method.Inputs.Pack( - seqNum, - message, - new(big.Int).SetUint64(delayedMsg), - b.config().gasRefunder, - new(big.Int).SetUint64(uint64(prevMsgNum)), - new(big.Int).SetUint64(uint64(newMsgNum)), - ) + return nil, nil, errors.New("failed to find add batch method") + } + var calldata []byte + var kzgBlobs []kzg4844.Blob + var err error + if use4844 { + kzgBlobs, err = blobs.EncodeBlobs(l2MessageData) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode blobs: %w", err) + } + // EIP4844 transactions to the sequencer inbox will not use transaction calldata for L2 info. + calldata, err = method.Inputs.Pack( + seqNum, + new(big.Int).SetUint64(delayedMsg), + b.config().gasRefunder, + new(big.Int).SetUint64(uint64(prevMsgNum)), + new(big.Int).SetUint64(uint64(newMsgNum)), + ) + } else { + calldata, err = method.Inputs.Pack( + seqNum, + l2MessageData, + new(big.Int).SetUint64(delayedMsg), + b.config().gasRefunder, + new(big.Int).SetUint64(uint64(prevMsgNum)), + new(big.Int).SetUint64(uint64(newMsgNum)), + ) + } if err != nil { - return nil, err + return nil, nil, err } - fullData := append([]byte{}, method.ID...) - fullData = append(fullData, inputData...) - return fullData, nil + fullCalldata := append([]byte{}, method.ID...) + fullCalldata = append(fullCalldata, calldata...) + return fullCalldata, kzgBlobs, nil } var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") -func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realNonce uint64, realAccessList types.AccessList) (uint64, error) { +type estimateGasParams struct { + From common.Address `json:"from"` + To *common.Address `json:"to"` + Data []byte `json:"data"` + AccessList types.AccessList `json:"accessList"` + BlobHashes []common.Hash `json:"blobVersionedHashes"` +} + +func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { + var gas uint64 + err := client.CallContext(ctx, &gas, "eth_estimateGas", params) + return gas, err +} + +func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { config := b.config() useNormalEstimation := b.dataPoster.MaxMempoolTransactions() == 1 if !useNormalEstimation { @@ -787,12 +847,18 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, } useNormalEstimation = latestNonce == realNonce } + rawRpcClient := b.l1Reader.Client().Client() if useNormalEstimation { + _, realBlobHashes, err := blobs.ComputeCommitmentsAndHashes(realBlobs) + if err != nil { + return 0, fmt.Errorf("failed to compute real blob commitments: %w", err) + } // If we're at the latest nonce, we can skip the special future tx estimate stuff - gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ + gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ From: b.dataPoster.Sender(), To: &b.seqInboxAddr, Data: realData, + BlobHashes: realBlobHashes, AccessList: realAccessList, }) if err != nil { @@ -805,14 +871,19 @@ func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, // However, we set nextMsgNum to 1 because it is necessary for a correct estimation for the final to be non-zero. // Because we're likely estimating against older state, this might not be the actual next message, // but the gas used should be the same. - data, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages) + data, kzgBlobs, err := b.encodeAddBatch(abi.MaxUint256, 0, 1, sequencerMessage, delayedMessages, len(realBlobs) > 0) if err != nil { return 0, err } - gas, err := b.l1Reader.Client().EstimateGas(ctx, ethereum.CallMsg{ - From: b.dataPoster.Sender(), - To: &b.seqInboxAddr, - Data: data, + _, blobHashes, err := blobs.ComputeCommitmentsAndHashes(kzgBlobs) + if err != nil { + return 0, fmt.Errorf("failed to compute blob commitments: %w", err) + } + gas, err := estimateGas(rawRpcClient, ctx, estimateGasParams{ + From: b.dataPoster.Sender(), + To: &b.seqInboxAddr, + Data: data, + BlobHashes: blobHashes, // This isn't perfect because we're probably estimating the batch at a different sequence number, // but it should overestimate rather than underestimate which is fine. AccessList: realAccessList, @@ -1039,7 +1110,20 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } - data, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg) + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + var use4844 bool + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, use4844) if err != nil { return false, err } @@ -1051,7 +1135,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) // In theory, this might reduce gas usage, but only by a factor that's already // accounted for in `config.ExtraBatchGas`, as that same factor can appear if a user // posts a new delayed message that we didn't see while gas estimating. - gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, nonce, accessList) + gasLimit, err := b.estimateGas(ctx, sequencerMsg, lastPotentialMsg.DelayedMessagesRead, data, kzgBlobs, nonce, accessList) if err != nil { return false, err } @@ -1071,7 +1155,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) data, gasLimit, new(big.Int), - nil, // TODO: use blobs + kzgBlobs, accessList, ) if err != nil { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 425dba8e18..ba9c278ba5 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -464,7 +464,7 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u } newBlobFeeCap := big.NewInt(0) if latestHeader.ExcessBlobGas != nil { - newBlobFeeCap = eip4844.CalcBlobFee(*latestHeader.ExcessBlobGas) + newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) } else if numBlobs > 0 { return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing ExcessBlobGas but blobs were specified in data poster transaction (either the parent chain node is not synced or EIP-4844 was improperly activated)", latestHeader.Number) @@ -612,9 +612,13 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim // Intentionally break out of date data poster redis clients, // so they don't try to replace by fee a tx they don't understand deprecatedData.Nonce = ^uint64(0) - commitments, proofs, blobHashes, err := blobs.ComputeCommitmentsProofsAndHashes(kzgBlobs) + commitments, blobHashes, err := blobs.ComputeCommitmentsAndHashes(kzgBlobs) if err != nil { - return nil, fmt.Errorf("failed to compute KZG metadata: %w", err) + return nil, fmt.Errorf("failed to compute KZG commitments: %w", err) + } + proofs, err := blobs.ComputeBlobProofs(kzgBlobs, commitments) + if err != nil { + return nil, fmt.Errorf("failed to compute KZG proofs: %w", err) } inner = &types.BlobTx{ Nonce: nonce, diff --git a/arbutil/wait_for_l1.go b/arbutil/wait_for_l1.go index b66710dbf0..9fb2cd10f8 100644 --- a/arbutil/wait_for_l1.go +++ b/arbutil/wait_for_l1.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/rpc" ) type L1Interface interface { @@ -25,6 +26,7 @@ type L1Interface interface { BlockNumber(ctx context.Context) (uint64, error) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) ChainID(ctx context.Context) (*big.Int, error) + Client() rpc.ClientInterface } func SendTxAsCall(ctx context.Context, client L1Interface, tx *types.Transaction, from common.Address, blockNum *big.Int, unlimitedGas bool) ([]byte, error) { diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index c8025dc253..60cc898751 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -5,6 +5,7 @@ package blobs import ( "crypto/sha256" + "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -52,20 +53,15 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { } // Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs -func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) { +func ComputeCommitmentsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []common.Hash, error) { commitments := make([]kzg4844.Commitment, len(blobs)) - proofs := make([]kzg4844.Proof, len(blobs)) versionedHashes := make([]common.Hash, len(blobs)) for i := range blobs { var err error commitments[i], err = kzg4844.BlobToCommitment(blobs[i]) if err != nil { - return nil, nil, nil, err - } - proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) - if err != nil { - return nil, nil, nil, err + return nil, nil, err } // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. hash := sha256.Sum256(commitments[i][:]) @@ -73,5 +69,21 @@ func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitme versionedHashes[i] = hash } - return commitments, proofs, versionedHashes, nil + return commitments, versionedHashes, nil +} + +func ComputeBlobProofs(blobs []kzg4844.Blob, commitments []kzg4844.Commitment) ([]kzg4844.Proof, error) { + if len(blobs) != len(commitments) { + return nil, fmt.Errorf("ComputeBlobProofs got %v blobs but %v commitments", len(blobs), len(commitments)) + } + proofs := make([]kzg4844.Proof, len(blobs)) + for i := range blobs { + var err error + proofs[i], err = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) + if err != nil { + return nil, err + } + } + + return proofs, nil } From d229f3c306fb89f5c7193ebbbeb2b8aa165433ce Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 00:30:51 -0700 Subject: [PATCH 17/40] Use updated contracts --- arbnode/batch_poster.go | 2 +- contracts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 2dc9bac340..87170caa8a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -61,7 +61,7 @@ const ( batchPosterSimpleRedisLockKey = "node.batch-poster.redis-lock.simple-lock-key" sequencerBatchPostMethodName = "addSequencerL2BatchFromOrigin0" - sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlob" // TODO: this will probably get updated to have a plural name + sequencerBatchPostWithBlobsMethodName = "addSequencerL2BatchFromBlobs" ) type batchPosterPosition struct { diff --git a/contracts b/contracts index a8e7709bfc..00d4d62578 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit a8e7709bfc918f9b8e2888d47f2fd8454779fd11 +Subproject commit 00d4d6257835ba58bb381ce8d884a819d7ce9448 From eba35e5dca4dbddfec6759d2a68fc2df91fcaa02 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 12:53:25 -0700 Subject: [PATCH 18/40] Fix bit checks --- arbstate/das_reader.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arbstate/das_reader.go b/arbstate/das_reader.go index 5f568605b1..46d01b7bb1 100644 --- a/arbstate/das_reader.go +++ b/arbstate/das_reader.go @@ -46,20 +46,24 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 +func hasBits(checking byte, bits byte) bool { + return (checking & bits) == bits +} + func IsDASMessageHeaderByte(header byte) bool { - return (DASMessageHeaderFlag & header) > 0 + return hasBits(header, DASMessageHeaderFlag) } func IsTreeDASMessageHeaderByte(header byte) bool { - return (TreeDASMessageHeaderFlag & header) > 0 + return hasBits(header, TreeDASMessageHeaderFlag) } func IsZeroheavyEncodedHeaderByte(header byte) bool { - return (ZeroheavyMessageHeaderFlag & header) > 0 + return hasBits(header, ZeroheavyMessageHeaderFlag) } func IsBlobHashesHeaderByte(header byte) bool { - return (BlobHashesHeaderFlag & header) > 0 + return hasBits(header, BlobHashesHeaderFlag) } func IsBrotliMessageHeaderByte(b uint8) bool { From 863911649278001d7324e4a000be0f02e871fbb0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 12:26:19 -0700 Subject: [PATCH 19/40] Fix Data field type in estimateGasParams --- arbnode/batch_poster.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 87170caa8a..b4bf4c807b 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg4844" @@ -825,7 +826,7 @@ var ErrNormalGasEstimationFailed = errors.New("normal gas estimation failed") type estimateGasParams struct { From common.Address `json:"from"` To *common.Address `json:"to"` - Data []byte `json:"data"` + Data hexutil.Bytes `json:"data"` AccessList types.AccessList `json:"accessList"` BlobHashes []common.Hash `json:"blobVersionedHashes"` } From 5cdcde12efbce6fe425df4bdebd7cef842135892 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 16:14:32 -0700 Subject: [PATCH 20/40] Fix raw estimateGas in batch poster --- arbnode/batch_poster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index b4bf4c807b..28c248043a 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -832,9 +832,9 @@ type estimateGasParams struct { } func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { - var gas uint64 + var gas hexutil.Uint64 err := client.CallContext(ctx, &gas, "eth_estimateGas", params) - return gas, err + return uint64(gas), err } func (b *BatchPoster) estimateGas(ctx context.Context, sequencerMessage []byte, delayedMessages uint64, realData []byte, realBlobs []kzg4844.Blob, realNonce uint64, realAccessList types.AccessList) (uint64, error) { From 54ee5c31ffdba4119a124dcd9bce09813f670e52 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 16:14:49 -0700 Subject: [PATCH 21/40] Misc refactors while I was tracking down the previous bug --- arbnode/batch_poster.go | 11 ++++++----- arbnode/dataposter/data_poster.go | 19 ++++++++++--------- system_tests/batch_poster_test.go | 2 +- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 28c248043a..53d3e7f403 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -828,7 +828,7 @@ type estimateGasParams struct { To *common.Address `json:"to"` Data hexutil.Bytes `json:"data"` AccessList types.AccessList `json:"accessList"` - BlobHashes []common.Hash `json:"blobVersionedHashes"` + BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"` } func estimateGas(client rpc.ClientInterface, ctx context.Context, params estimateGasParams) (uint64, error) { @@ -1164,12 +1164,13 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } log.Info( "BatchPoster: batch sent", - "sequence nr.", batchPosition.NextSeqNum, + "sequenceNumber", batchPosition.NextSeqNum, "from", batchPosition.MessageCount, "to", b.building.msgCount, - "prev delayed", batchPosition.DelayedMessageCount, - "current delayed", b.building.segments.delayedMsg, - "total segments", len(b.building.segments.rawSegments), + "prevDelayed", batchPosition.DelayedMessageCount, + "currentDelayed", b.building.segments.delayedMsg, + "totalSegments", len(b.building.segments.rawSegments), + "numBlobs", len(kzgBlobs), ) recentlyHitL1Bounds := time.Since(b.lastHitL1Bounds) < config.PollInterval*3 postedMessages := b.building.msgCount - batchPosition.MessageCount diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index ba9c278ba5..4f3f514d11 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -636,7 +636,7 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim ChainID: p.parentChainID256, } // reuse the code to convert gas fee and tip caps to uint256s - inner, err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) + err = updateTxDataGasCaps(inner, feeCap, tipCap, blobFeeCap) if err != nil { return nil, err } @@ -714,34 +714,35 @@ func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransacti return p.saveTx(ctx, newTx, &newerTx) } -func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (types.TxData, error) { +func updateTxDataGasCaps(data types.TxData, newFeeCap, newTipCap, newBlobFeeCap *big.Int) error { switch data := data.(type) { case *types.DynamicFeeTx: data.GasFeeCap = newFeeCap data.GasTipCap = newTipCap - return data, nil + return nil case *types.BlobTx: var overflow bool data.GasFeeCap, overflow = uint256.FromBig(newFeeCap) if overflow { - return nil, fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) + return fmt.Errorf("blob tx fee cap %v exceeds uint256", newFeeCap) } data.GasTipCap, overflow = uint256.FromBig(newTipCap) if overflow { - return nil, fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) + return fmt.Errorf("blob tx tip cap %v exceeds uint256", newTipCap) } data.BlobFeeCap, overflow = uint256.FromBig(newBlobFeeCap) if overflow { - return nil, fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) + return fmt.Errorf("blob tx blob fee cap %v exceeds uint256", newBlobFeeCap) } - return data, nil + return nil default: - return nil, fmt.Errorf("unexpected transaction data type %T", data) + return fmt.Errorf("unexpected transaction data type %T", data) } } func updateGasCaps(tx *types.Transaction, newFeeCap, newTipCap, newBlobFeeCap *big.Int) (*types.Transaction, error) { - data, err := updateTxDataGasCaps(tx.GetInner(), newFeeCap, newTipCap, newBlobFeeCap) + data := tx.GetInner() + err := updateTxDataGasCaps(data, newFeeCap, newTipCap, newBlobFeeCap) if err != nil { return nil, err } diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index f7bf74f699..cacbe3cee4 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -180,7 +180,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { } lastTxHash := txs[len(txs)-1].Hash() - for i := 90; i > 0; i-- { + for i := 90; i >= 0; i-- { builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ builder.L1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), }) From 4925e63acaafc651c64302062f976cd9d4c754fe Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 16:23:23 -0700 Subject: [PATCH 22/40] Add option to force posting 4844 blobs --- arbnode/batch_poster.go | 20 ++++++++++++++------ arbnode/dataposter/data_poster.go | 8 ++++++-- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 53d3e7f403..4a07d36521 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -134,6 +134,7 @@ type BatchPosterConfig struct { RedisLock redislock.SimpleCfg `koanf:"redis-lock" reload:"hot"` ExtraBatchGas uint64 `koanf:"extra-batch-gas" reload:"hot"` Post4844Blobs bool `koanf:"post-4844-blobs" reload:"hot"` + ForcePost4844Blobs bool `koanf:"force-post-4844-blobs" reload:"hot"` ParentChainWallet genericconf.WalletConfig `koanf:"parent-chain-wallet"` L1BlockBound string `koanf:"l1-block-bound" reload:"hot"` L1BlockBoundBypass time.Duration `koanf:"l1-block-bound-bypass" reload:"hot"` @@ -182,6 +183,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".gas-refunder-address", DefaultBatchPosterConfig.GasRefunderAddress, "The gas refunder contract address (optional)") f.Uint64(prefix+".extra-batch-gas", DefaultBatchPosterConfig.ExtraBatchGas, "use this much more gas than estimation says is necessary to post batches") f.Bool(prefix+".post-4844-blobs", DefaultBatchPosterConfig.Post4844Blobs, "if the parent chain supports 4844 blobs and they're well priced, post EIP-4844 blobs") + f.Bool(prefix+".force-post-4844-blobs", DefaultBatchPosterConfig.ForcePost4844Blobs, "if the parent chain supports 4844 blobs and post-4844-blobs is true, post 4844 blobs even if it's not price efficient") f.String(prefix+".redis-url", DefaultBatchPosterConfig.RedisUrl, "if non-empty, the Redis URL to store queued transactions in") f.String(prefix+".l1-block-bound", DefaultBatchPosterConfig.L1BlockBound, "only post messages to batches when they're within the max future block/timestamp as of this L1 block tag (\"safe\", \"finalized\", \"latest\", or \"ignore\" to ignore this check)") f.Duration(prefix+".l1-block-bound-bypass", DefaultBatchPosterConfig.L1BlockBoundBypass, "post batches even if not within the layer 1 future bounds if we're within this margin of the max delay") @@ -205,6 +207,7 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 50_000, Post4844Blobs: true, + ForcePost4844Blobs: false, DataPoster: dataposter.DefaultDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -233,6 +236,7 @@ var TestBatchPosterConfig = BatchPosterConfig{ GasRefunderAddress: "", ExtraBatchGas: 10_000, Post4844Blobs: true, + ForcePost4844Blobs: false, DataPoster: dataposter.TestDataPosterConfig, ParentChainWallet: DefaultBatchPosterL1WalletConfig, L1BlockBound: "", @@ -1116,13 +1120,17 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, err } var use4844 bool - if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.ForcePost4844Blobs { + use4844 = true + } else { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } } data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, use4844) if err != nil { diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 4f3f514d11..1415f78140 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -463,11 +463,15 @@ func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit u return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing BaseFee (either the parent chain does not have EIP-1559 or the parent chain node is not synced)", latestHeader.Number) } newBlobFeeCap := big.NewInt(0) - if latestHeader.ExcessBlobGas != nil { + if latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { newBlobFeeCap = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) newBlobFeeCap.Mul(newBlobFeeCap, common.Big2) } else if numBlobs > 0 { - return nil, nil, nil, fmt.Errorf("latest parent chain block %v missing ExcessBlobGas but blobs were specified in data poster transaction (either the parent chain node is not synced or EIP-4844 was improperly activated)", latestHeader.Number) + return nil, nil, nil, fmt.Errorf( + "latest parent chain block %v missing ExcessBlobGas or BlobGasUsed but blobs were specified in data poster transaction "+ + "(either the parent chain node is not synced or the EIP-4844 was improperly activated)", + latestHeader.Number, + ) } softConfBlock := arbmath.BigSubByUint(latestHeader.Number, config.NonceRbfSoftConfs) softConfNonce, err := p.client.NonceAt(ctx, p.Sender(), softConfBlock) From b6b54ef82b6f803fc5998777ffc138c23a4faa59 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 17:35:49 -0700 Subject: [PATCH 23/40] Use custom beacon request helper --- arbnode/blob_reader.go | 158 ++++++++++++++++++----------------- arbnode/node.go | 8 +- util/beaconclient/client.go | 98 ---------------------- util/beaconclient/errors.go | 40 --------- util/beaconclient/options.go | 48 ----------- util/blobs/blobs.go | 12 ++- 6 files changed, 90 insertions(+), 274 deletions(-) delete mode 100644 util/beaconclient/client.go delete mode 100644 util/beaconclient/errors.go delete mode 100644 util/beaconclient/options.go diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index 673df37b1f..d7560f47e4 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -5,24 +5,26 @@ package arbnode import ( "context" - "crypto/sha256" "encoding/json" "fmt" + "io" + "net/http" + "path" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/util/beaconclient" + "github.com/offchainlabs/nitro/util/blobs" "github.com/offchainlabs/nitro/util/pretty" - "github.com/pkg/errors" "github.com/spf13/pflag" ) type BlobClient struct { - bc *beaconclient.Client - ec arbutil.L1Interface + config BlobClientConfig + ec arbutil.L1Interface + httpClient *http.Client // The genesis time time won't change so only request it once. cachedGenesisTime uint64 @@ -40,8 +42,45 @@ func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") } -func NewBlobClient(bc *beaconclient.Client, ec arbutil.L1Interface) *BlobClient { - return &BlobClient{bc: bc, ec: ec} +func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) *BlobClient { + return &BlobClient{ + config: config, + ec: ec, + httpClient: &http.Client{}, + } +} + +type fullResult[T any] struct { + Data T `json:"data"` +} + +func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath string) (T, error) { + // Unfortunately, methods on a struct can't be generic. + + var empty T + + req, err := http.NewRequestWithContext(ctx, "GET", path.Join(b.config.BeaconChainUrl, beaconPath), http.NoBody) + if err != nil { + return empty, err + } + + resp, err := b.httpClient.Do(req) + if err != nil { + return empty, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return empty, err + } + + var full fullResult[T] + if err := json.Unmarshal(body, &full); err != nil { + return empty, err + } + + return full.Data, nil } // Get all the blobs associated with a particular block. @@ -62,58 +101,48 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio return b.blobSidecars(ctx, slot, versionedHashes) } -type blobResponse struct { - Data []blobResponseItem `json:"data"` -} type blobResponseItem struct { - BlockRoot string `json:"block_root"` - Index int `json:"index"` - Slot uint64 `json:"slot"` - BlockParentRoot string `json:"block_parent_root"` - ProposerIndex uint64 `json:"proposer_index"` - Blob string `json:"blob"` - KzgCommitment string `json:"kzg_commitment"` - KzgProof string `json:"kzg_proof"` + BlockRoot string `json:"block_root"` + Index int `json:"index"` + Slot uint64 `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex uint64 `json:"proposer_index"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` } func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { - body, err := b.bc.Get(ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) + response, err := beaconRequest[[]blobResponseItem](b, ctx, fmt.Sprintf("/eth/v1/beacon/blob_sidecars/%d", slot)) if err != nil { - return nil, errors.Wrap(err, "error calling beacon client in blobSidecars") + return nil, fmt.Errorf("error calling beacon client in blobSidecars: %w", err) } - br := &blobResponse{} - err = json.Unmarshal(body, br) - if err != nil { - return nil, errors.Wrap(err, "error decoding json response in blobSidecars") - } - - if len(br.Data) == 0 { - return nil, fmt.Errorf("no blobs found for slot %d", slot) + if len(response) < len(versionedHashes) { + return nil, fmt.Errorf("expected at least %d blobs for slot %d but only got %d", len(versionedHashes), slot, len(response)) } - blobs := make([]kzg4844.Blob, len(versionedHashes)) - var totalFound int + output := make([]kzg4844.Blob, len(versionedHashes)) + outputsFound := make([]bool, len(versionedHashes)) - for i := range blobs { - commitmentBytes, err := hexutil.Decode(br.Data[i].KzgCommitment) - if err != nil { - return nil, fmt.Errorf("couldn't decode commitment for slot(%d) at index(%d), commitment(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgCommitment)) - } + for _, blobItem := range response { var commitment kzg4844.Commitment - copy(commitment[:], commitmentBytes) - versionedHash := kZGToVersionedHash(commitment) + copy(commitment[:], blobItem.KzgCommitment) + versionedHash := blobs.CommitmentToVersionedHash(commitment) // The versioned hashes of the blob commitments are produced in the by HASH_OPCODE_BYTE, // presumably in the order they were added to the tx. The spec is unclear if the blobs // need to be returned in any particular order from the beacon API, so we put them back in // the order from the tx. - var j int + var outputIdx int var found bool - for j = range versionedHashes { - if versionedHashes[j] == versionedHash { + for outputIdx = range versionedHashes { + if versionedHashes[outputIdx] == versionedHash { found = true - totalFound++ + if outputsFound[outputIdx] { + return nil, fmt.Errorf("found blob with versioned hash %v twice", versionedHash) + } + outputsFound[outputIdx] = true break } } @@ -121,30 +150,24 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas continue } - blob, err := hexutil.Decode(br.Data[i].Blob) - if err != nil { - return nil, fmt.Errorf("couldn't decode blob for slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) - } - copy(blobs[j][:], blob) + copy(output[outputIdx][:], blobItem.Blob) - proofBytes, err := hexutil.Decode(br.Data[i].KzgProof) - if err != nil { - return nil, fmt.Errorf("couldn't decode proof for slot(%d) at index(%d), proof(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].KzgProof)) - } var proof kzg4844.Proof - copy(proof[:], proofBytes) + copy(proof[:], blobItem.KzgProof) - err = kzg4844.VerifyBlobProof(blobs[j], commitment, proof) + err = kzg4844.VerifyBlobProof(output[outputIdx], commitment, proof) if err != nil { - return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, br.Data[i].Index, pretty.FirstFewChars(br.Data[i].Blob)) + return nil, fmt.Errorf("failed to verify blob proof for blob at slot(%d) at index(%d), blob(%s)", slot, blobItem.Index, pretty.FirstFewChars(blobItem.Blob.String())) } } - if totalFound < len(versionedHashes) { - return nil, fmt.Errorf("not all of the requested blobs (%d/%d) were found at slot (%d), can't reconstruct batch payload", totalFound, len(versionedHashes), slot) + for i, found := range outputsFound { + if !found { + return nil, fmt.Errorf("missing blob %v in slot %v, can't reconstruct batch payload", versionedHashes[i], slot) + } } - return blobs, nil + return output, nil } type genesisResponse struct { @@ -157,29 +180,10 @@ func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { return b.cachedGenesisTime, nil } - body, err := b.bc.Get(ctx, "/eth/v1/beacon/genesis") + gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") if err != nil { - return 0, errors.Wrap(err, "error calling beacon client in genesisTime") - } - - gr := &genesisResponse{} - dataWrapper := &struct{ Data *genesisResponse }{Data: gr} - err = json.Unmarshal(body, dataWrapper) - if err != nil { - return 0, errors.Wrap(err, "error decoding json response in genesisTime") + return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) } return gr.GenesisTime, nil } - -// The following code is taken from core/vm/contracts.go -const ( - blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. -) - -func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { - h := sha256.Sum256(kzg[:]) - h[0] = blobCommitmentVersionKZG - - return h -} diff --git a/arbnode/node.go b/arbnode/node.go index 99ecb541ee..9f5626bbaf 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -40,7 +40,6 @@ import ( "github.com/offchainlabs/nitro/solgen/go/rollupgen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/staker/validatorwallet" - "github.com/offchainlabs/nitro/util/beaconclient" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/headerreader" "github.com/offchainlabs/nitro/util/redisutil" @@ -518,12 +517,7 @@ func createNodeImpl( var blobReader arbstate.BlobReader if config.BlobClient.BeaconChainUrl != "" { - bc, err := beaconclient.NewClient(config.BlobClient.BeaconChainUrl) - if err != nil { - return nil, err - } - - blobReader = NewBlobClient(bc, l1client) + blobReader = NewBlobClient(config.BlobClient, l1client) } inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) diff --git a/util/beaconclient/client.go b/util/beaconclient/client.go deleted file mode 100644 index e2dfd8e6bf..0000000000 --- a/util/beaconclient/client.go +++ /dev/null @@ -1,98 +0,0 @@ -package beaconclient - -import ( - "context" - "io" - "net" - "net/http" - "net/url" - - "github.com/pkg/errors" -) - -// Client is a wrapper object around the HTTP client. -// Taken from prysm/api/client. -type Client struct { - hc *http.Client - baseURL *url.URL - token string -} - -// NewClient constructs a new client with the provided options (ex WithTimeout). -// `host` is the base host + port used to construct request urls. This value can be -// a URL string, or NewClient will assume an http endpoint if just `host:port` is used. -func NewClient(host string, opts ...ClientOpt) (*Client, error) { - u, err := urlForHost(host) - if err != nil { - return nil, err - } - c := &Client{ - hc: &http.Client{}, - baseURL: u, - } - for _, o := range opts { - o(c) - } - return c, nil -} - -// Token returns the bearer token used for jwt authentication -func (c *Client) Token() string { - return c.token -} - -// BaseURL returns the base url of the client -func (c *Client) BaseURL() *url.URL { - return c.baseURL -} - -// Do execute the request against the http client -func (c *Client) Do(req *http.Request) (*http.Response, error) { - return c.hc.Do(req) -} - -func urlForHost(h string) (*url.URL, error) { - // try to parse as url (being permissive) - u, err := url.Parse(h) - if err == nil && u.Host != "" { - return u, nil - } - // try to parse as host:port - host, port, err := net.SplitHostPort(h) - if err != nil { - return nil, ErrMalformedHostname - } - return &url.URL{Host: net.JoinHostPort(host, port), Scheme: "http"}, nil -} - -// NodeURL returns a human-readable string representation of the beacon node base url. -func (c *Client) NodeURL() string { - return c.baseURL.String() -} - -// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package. -func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) { - u := c.baseURL.ResolveReference(&url.URL{Path: path}) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - for _, o := range opts { - o(req) - } - r, err := c.hc.Do(req) - if err != nil { - return nil, err - } - defer func() { - err = r.Body.Close() - }() - if r.StatusCode != http.StatusOK { - return nil, Non200Err(r) - } - b, err := io.ReadAll(r.Body) - if err != nil { - return nil, errors.Wrap(err, "error reading http response body") - } - return b, nil -} diff --git a/util/beaconclient/errors.go b/util/beaconclient/errors.go deleted file mode 100644 index 7ee88805cd..0000000000 --- a/util/beaconclient/errors.go +++ /dev/null @@ -1,40 +0,0 @@ -package beaconclient - -import ( - "fmt" - "io" - "net/http" - - "github.com/pkg/errors" -) - -// ErrMalformedHostname is used to indicate if a host name's format is incorrect. -var ErrMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500") - -// ErrNotOK is used to indicate when an HTTP request to the API failed with any non-2xx response code. -// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK. -var ErrNotOK = errors.New("did not receive 2xx response from API") - -// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API. -var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API") - -// ErrInvalidNodeVersion indicates that the /eth/v1/node/version API response format was not recognized. -var ErrInvalidNodeVersion = errors.New("invalid node version response") - -// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error. -func Non200Err(response *http.Response) error { - bodyBytes, err := io.ReadAll(response.Body) - var body string - if err != nil { - body = "(Unable to read response body.)" - } else { - body = "response body:\n" + string(bodyBytes) - } - msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body) - switch response.StatusCode { - case 404: - return errors.Wrap(ErrNotFound, msg) - default: - return errors.Wrap(ErrNotOK, msg) - } -} diff --git a/util/beaconclient/options.go b/util/beaconclient/options.go deleted file mode 100644 index 98a37e17a0..0000000000 --- a/util/beaconclient/options.go +++ /dev/null @@ -1,48 +0,0 @@ -package beaconclient - -import ( - "fmt" - "net/http" - "time" -) - -// ReqOption is a request functional option. -type ReqOption func(*http.Request) - -// WithSSZEncoding is a request functional option that adds SSZ encoding header. -func WithSSZEncoding() ReqOption { - return func(req *http.Request) { - req.Header.Set("Accept", "application/octet-stream") - } -} - -// WithAuthorizationToken is a request functional option that adds header for authorization token. -func WithAuthorizationToken(token string) ReqOption { - return func(req *http.Request) { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - } -} - -// ClientOpt is a functional option for the Client type (http.Client wrapper) -type ClientOpt func(*Client) - -// WithTimeout sets the .Timeout attribute of the wrapped http.Client. -func WithTimeout(timeout time.Duration) ClientOpt { - return func(c *Client) { - c.hc.Timeout = timeout - } -} - -// WithRoundTripper replaces the underlying HTTP's transport with a custom one. -func WithRoundTripper(t http.RoundTripper) ClientOpt { - return func(c *Client) { - c.hc.Transport = t - } -} - -// WithAuthenticationToken sets an oauth token to be used. -func WithAuthenticationToken(token string) ClientOpt { - return func(c *Client) { - c.token = token - } -} diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index c8025dc253..9f6c8d1303 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -51,6 +51,13 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { return outputData, err } +func CommitmentToVersionedHash(commitment kzg4844.Commitment) common.Hash { + // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. + hash := sha256.Sum256(commitment[:]) + hash[0] = 1 + return hash +} + // Return KZG commitments, proofs, and versioned hashes that corresponds to these blobs func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitment, []kzg4844.Proof, []common.Hash, error) { commitments := make([]kzg4844.Commitment, len(blobs)) @@ -67,10 +74,7 @@ func ComputeCommitmentsProofsAndHashes(blobs []kzg4844.Blob) ([]kzg4844.Commitme if err != nil { return nil, nil, nil, err } - // As per the EIP-4844 spec, the versioned hash is the SHA-256 hash of the commitment with the first byte set to 1. - hash := sha256.Sum256(commitments[i][:]) - hash[0] = 1 - versionedHashes[i] = hash + versionedHashes[i] = CommitmentToVersionedHash(commitments[i]) } return commitments, proofs, versionedHashes, nil From 2ba1c490c3fb0fef695ade891e102da167350c0e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 18:14:17 -0700 Subject: [PATCH 24/40] Update contracts to support zero basefee for gas estimation --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 00d4d62578..e253b8b1b5 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 00d4d6257835ba58bb381ce8d884a819d7ce9448 +Subproject commit e253b8b1b5865f135ac63ea3d3cea1bfe8ef2ad7 From 2bd34c65b3d5fba594690dba15be296c27b80454 Mon Sep 17 00:00:00 2001 From: Tristan Wilson Date: Tue, 23 Jan 2024 17:25:51 -0800 Subject: [PATCH 25/40] Fetch seconds per slot from beacon endpoint --- arbnode/blob_reader.go | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index d7560f47e4..02626efff5 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -26,8 +26,9 @@ type BlobClient struct { ec arbutil.L1Interface httpClient *http.Client - // The genesis time time won't change so only request it once. - cachedGenesisTime uint64 + // The genesis time time and seconds per slot won't change so only request them once. + cachedGenesisTime uint64 + cachedSecondsPerSlot uint64 } type BlobClientConfig struct { @@ -89,15 +90,15 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio if err != nil { return nil, err } - genesisTime, err := b.genesisTime(ctx) if err != nil { return nil, err } - - // TODO make denominator configurable for devnets with faster block time - slot := (header.Time - genesisTime) / 12 - + secondsPerSlot, err := b.secondsPerSlot(ctx) + if err != nil { + return nil, err + } + slot := (header.Time - genesisTime) / secondsPerSlot return b.blobSidecars(ctx, slot, versionedHashes) } @@ -179,11 +180,27 @@ func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { if b.cachedGenesisTime > 0 { return b.cachedGenesisTime, nil } - gr, err := beaconRequest[genesisResponse](b, ctx, "/eth/v1/beacon/genesis") if err != nil { return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) } + b.cachedGenesisTime = gr.GenesisTime + return b.cachedGenesisTime, nil +} + +type getSpecResponse struct { + SecondsPerSlot uint64 `json:"SECONDS_PER_SLOT"` +} + +func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { + if b.cachedSecondsPerSlot > 0 { + return b.cachedSecondsPerSlot, nil + } + gr, err := beaconRequest[getSpecResponse](b, ctx, "/eth/v1/config/spec") + if err != nil { + return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) + } + b.cachedSecondsPerSlot = gr.SecondsPerSlot + return b.cachedSecondsPerSlot, nil - return gr.GenesisTime, nil } From fe9fce1c995734b2fcedb4c0d8658a578156a43e Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 18:55:45 -0700 Subject: [PATCH 26/40] Update go-ethereum pin to fix trusted setup --- go-ethereum | 2 +- go.mod | 6 +++--- go.sum | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go-ethereum b/go-ethereum index c4b3aa68a0..eca6e11dad 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit c4b3aa68a05f468e0c30147f9383bfc76d82388f +Subproject commit eca6e11dad2c7f8cd1276e38678afec271323422 diff --git a/go.mod b/go.mod index e38f3209dd..d50090f6c1 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,6 @@ require ( github.com/libp2p/go-libp2p v0.27.8 github.com/multiformats/go-multiaddr v0.12.1 github.com/multiformats/go-multihash v0.2.3 - github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703 github.com/spf13/pflag v1.0.5 @@ -90,7 +89,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect @@ -103,7 +102,7 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -234,6 +233,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect diff --git a/go.sum b/go.sum index 872afcafbf..d066f85214 100644 --- a/go.sum +++ b/go.sum @@ -264,8 +264,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHH github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= -github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -338,8 +338,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= -github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= From ac0978f6ad0758be8ae435d1c2fdafb4e68e41a4 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:14:06 -0700 Subject: [PATCH 27/40] Support batch data location for batch hashes --- arbnode/sequencer_inbox.go | 15 +++++++++++++++ arbutil/transaction_data.go | 12 ++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/arbnode/sequencer_inbox.go b/arbnode/sequencer_inbox.go index d0cdebfeff..b743bf0ef9 100644 --- a/arbnode/sequencer_inbox.go +++ b/arbnode/sequencer_inbox.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -33,6 +34,7 @@ const ( batchDataTxInput batchDataLocation = iota batchDataSeparateEvent batchDataNone + batchDataBlobHashes ) func init() { @@ -149,6 +151,19 @@ func (m *SequencerInboxBatch) getSequencerData(ctx context.Context, client arbut case batchDataNone: // No data when in a force inclusion batch return nil, nil + case batchDataBlobHashes: + tx, err := arbutil.GetLogTransaction(ctx, client, m.rawLog) + if err != nil { + return nil, err + } + if len(tx.BlobHashes()) == 0 { + return nil, fmt.Errorf("blob batch transaction %v has no blobs", tx.Hash()) + } + data := []byte{arbstate.BlobHashesHeaderFlag} + for _, h := range tx.BlobHashes() { + data = append(data, h[:]...) + } + return data, nil default: return nil, fmt.Errorf("batch has invalid data location %v", m.dataLocation) } diff --git a/arbutil/transaction_data.go b/arbutil/transaction_data.go index 7741af6e9b..8270a628bd 100644 --- a/arbutil/transaction_data.go +++ b/arbutil/transaction_data.go @@ -10,8 +10,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long -func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { +func GetLogTransaction(ctx context.Context, client L1Interface, log types.Log) (*types.Transaction, error) { tx, err := client.TransactionInBlock(ctx, log.BlockHash, log.TxIndex) if err != nil { return nil, err @@ -19,6 +18,15 @@ func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) if tx.Hash() != log.TxHash { return nil, fmt.Errorf("L1 client returned unexpected transaction hash %v when looking up block %v transaction %v with expected hash %v", tx.Hash(), log.BlockHash, log.TxIndex, log.TxHash) } + return tx, nil +} + +// GetLogEmitterTxData requires that the tx's data is at least 4 bytes long +func GetLogEmitterTxData(ctx context.Context, client L1Interface, log types.Log) ([]byte, error) { + tx, err := GetLogTransaction(ctx, client, log) + if err != nil { + return nil, err + } if len(tx.Data()) < 4 { return nil, fmt.Errorf("log emitting transaction %v unexpectedly does not have enough data", tx.Hash()) } From 8ca7cf89c2c375a93c6510adfa09613bea830616 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:17:34 -0700 Subject: [PATCH 28/40] Fix beacon URL concatenation --- arbnode/blob_reader.go | 19 ++++++++++++++----- arbnode/node.go | 5 ++++- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index 02626efff5..cf7c266513 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net/http" + "net/url" "path" "github.com/ethereum/go-ethereum/common" @@ -22,8 +23,8 @@ import ( ) type BlobClient struct { - config BlobClientConfig ec arbutil.L1Interface + beaconUrl *url.URL httpClient *http.Client // The genesis time time and seconds per slot won't change so only request them once. @@ -43,12 +44,16 @@ func BlobClientAddOptions(prefix string, f *pflag.FlagSet) { f.String(prefix+".beacon-chain-url", DefaultBlobClientConfig.BeaconChainUrl, "Beacon Chain url to use for fetching blobs") } -func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) *BlobClient { +func NewBlobClient(config BlobClientConfig, ec arbutil.L1Interface) (*BlobClient, error) { + beaconUrl, err := url.Parse(config.BeaconChainUrl) + if err != nil { + return nil, fmt.Errorf("failed to parse beacon chain URL: %w", err) + } return &BlobClient{ - config: config, ec: ec, + beaconUrl: beaconUrl, httpClient: &http.Client{}, - } + }, nil } type fullResult[T any] struct { @@ -60,7 +65,11 @@ func beaconRequest[T interface{}](b *BlobClient, ctx context.Context, beaconPath var empty T - req, err := http.NewRequestWithContext(ctx, "GET", path.Join(b.config.BeaconChainUrl, beaconPath), http.NoBody) + // not really a deep copy, but copies the Path part we care about + url := *b.beaconUrl + url.Path = path.Join(url.Path, beaconPath) + + req, err := http.NewRequestWithContext(ctx, "GET", url.String(), http.NoBody) if err != nil { return empty, err } diff --git a/arbnode/node.go b/arbnode/node.go index 9f5626bbaf..5990cfdbec 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -517,7 +517,10 @@ func createNodeImpl( var blobReader arbstate.BlobReader if config.BlobClient.BeaconChainUrl != "" { - blobReader = NewBlobClient(config.BlobClient, l1client) + blobReader, err = NewBlobClient(config.BlobClient, l1client) + if err != nil { + return nil, err + } } inboxTracker, err := NewInboxTracker(arbDb, txStreamer, daReader, blobReader) From ef832b23d559e1ff24142d3128a4bfd910e2148f Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:30:29 -0700 Subject: [PATCH 29/40] Unmarshal beacon chain JSON integers as strings --- arbnode/blob_reader.go | 25 +++++++++++++------------ util/jsonapi/uint64_string.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 12 deletions(-) create mode 100644 util/jsonapi/uint64_string.go diff --git a/arbnode/blob_reader.go b/arbnode/blob_reader.go index cf7c266513..1424285832 100644 --- a/arbnode/blob_reader.go +++ b/arbnode/blob_reader.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/util/blobs" + "github.com/offchainlabs/nitro/util/jsonapi" "github.com/offchainlabs/nitro/util/pretty" "github.com/spf13/pflag" @@ -112,14 +113,14 @@ func (b *BlobClient) GetBlobs(ctx context.Context, blockHash common.Hash, versio } type blobResponseItem struct { - BlockRoot string `json:"block_root"` - Index int `json:"index"` - Slot uint64 `json:"slot"` - BlockParentRoot string `json:"block_parent_root"` - ProposerIndex uint64 `json:"proposer_index"` - Blob hexutil.Bytes `json:"blob"` - KzgCommitment hexutil.Bytes `json:"kzg_commitment"` - KzgProof hexutil.Bytes `json:"kzg_proof"` + BlockRoot string `json:"block_root"` + Index jsonapi.Uint64String `json:"index"` + Slot jsonapi.Uint64String `json:"slot"` + BlockParentRoot string `json:"block_parent_root"` + ProposerIndex jsonapi.Uint64String `json:"proposer_index"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` } func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHashes []common.Hash) ([]kzg4844.Blob, error) { @@ -181,7 +182,7 @@ func (b *BlobClient) blobSidecars(ctx context.Context, slot uint64, versionedHas } type genesisResponse struct { - GenesisTime uint64 `json:"genesis_time"` + GenesisTime jsonapi.Uint64String `json:"genesis_time"` // don't currently care about other fields, add if needed } @@ -193,12 +194,12 @@ func (b *BlobClient) genesisTime(ctx context.Context) (uint64, error) { if err != nil { return 0, fmt.Errorf("error calling beacon client in genesisTime: %w", err) } - b.cachedGenesisTime = gr.GenesisTime + b.cachedGenesisTime = uint64(gr.GenesisTime) return b.cachedGenesisTime, nil } type getSpecResponse struct { - SecondsPerSlot uint64 `json:"SECONDS_PER_SLOT"` + SecondsPerSlot jsonapi.Uint64String `json:"SECONDS_PER_SLOT"` } func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { @@ -209,7 +210,7 @@ func (b *BlobClient) secondsPerSlot(ctx context.Context) (uint64, error) { if err != nil { return 0, fmt.Errorf("error calling beacon client in secondsPerSlot: %w", err) } - b.cachedSecondsPerSlot = gr.SecondsPerSlot + b.cachedSecondsPerSlot = uint64(gr.SecondsPerSlot) return b.cachedSecondsPerSlot, nil } diff --git a/util/jsonapi/uint64_string.go b/util/jsonapi/uint64_string.go new file mode 100644 index 0000000000..27cbb18526 --- /dev/null +++ b/util/jsonapi/uint64_string.go @@ -0,0 +1,32 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package jsonapi + +import ( + "fmt" + "strconv" +) + +// Uint64String is a uint64 that JSON marshals and unmarshals as string in decimal +type Uint64String uint64 + +func (u *Uint64String) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + return nil + } + + // Parse string as uint64, removing quotes + value, err := strconv.ParseUint(s[1:len(s)-1], 10, 64) + if err != nil { + return err + } + + *u = Uint64String(value) + return nil +} + +func (u Uint64String) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("\"%d\"", uint64(u))), nil +} From 13ed4c63a4e8822d93354171e74b43e89bd7646b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:30:41 -0700 Subject: [PATCH 30/40] Fix blob decoding not accepting trailing bytes --- util/blobs/blobs.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 9f6c8d1303..02a22556a7 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -4,6 +4,7 @@ package blobs import ( + "bytes" "crypto/sha256" "github.com/ethereum/go-ethereum/common" @@ -47,7 +48,7 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { } } var outputData []byte - err := rlp.DecodeBytes(rlpData, &outputData) + err := rlp.Decode(bytes.NewReader(rlpData), &outputData) return outputData, err } From b48224390ae90975a7d249fe2cc2fcc266fd09d3 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 19:54:09 -0700 Subject: [PATCH 31/40] Fix arbitrator tests and hopefully CI --- .github/workflows/codeql-analysis.yml | 3 +++ .../prover/test-cases/rust/data/msg0.bin | Bin 1 -> 2 bytes .../prover/test-cases/rust/data/msg1.bin | Bin 32 -> 32 bytes .../prover/test-cases/rust/src/bin/host-io.rs | 19 +++++++++++------- contracts | 2 +- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 8fb9d80c21..8b7ebd0e15 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -75,6 +75,9 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + - name: Cache Rust Build Products uses: actions/cache@v3 with: diff --git a/arbitrator/prover/test-cases/rust/data/msg0.bin b/arbitrator/prover/test-cases/rust/data/msg0.bin index 5cd813e5c5f312673ce9cf39fb832fb2d55116cc..7eb0b7fdf9dac856acf46ccada40589a72c84592 100644 GIT binary patch literal 2 JcmZQr1pol70IL81 literal 1 IcmZ1l00651s{jB1 diff --git a/arbitrator/prover/test-cases/rust/data/msg1.bin b/arbitrator/prover/test-cases/rust/data/msg1.bin index 2ea3dec3e1641a99198fd86a817f906db6ddaf76..fefa1cc823e0ec463b7923972b03901b52808fc1 100644 GIT binary patch delta 6 NcmY#TU}Tse4*&wG0Pz3- literal 32 ncmZQ%VrF4wW9Q)H;^yJy;};MV5*85^6PJ*bl9rK`lUD!$7k~m0 diff --git a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs index d16f60ff50..679ee14486 100644 --- a/arbitrator/prover/test-cases/rust/src/bin/host-io.rs +++ b/arbitrator/prover/test-cases/rust/src/bin/host-io.rs @@ -29,24 +29,29 @@ fn main() { let mut bytebuffer = Bytes32([0x0; 32]); // in delayed inbox - we're skipping the "kind" byte println!("delayed inbox message 0"); + let mut expected_buffer = bytebuffer.0; let len = wavm_read_delayed_inbox_message(0, bytebuffer.0.as_mut_ptr(), DELAYED_HEADER_LEN); - assert_eq!(len, 2); - assert_eq!(bytebuffer.0[1], 0xaa); + assert_eq!(len, 3); + expected_buffer[2] = 0xaa; + assert_eq!(bytebuffer.0, expected_buffer); println!("delayed inbox message 1"); let len = wavm_read_delayed_inbox_message(1, bytebuffer.0.as_mut_ptr(), DELAYED_HEADER_LEN); assert_eq!(len, 32); - for j in 1..31 { - assert_eq!(bytebuffer.0[j], (j as u8)); + for j in 1..32 { + assert_eq!(bytebuffer.0[j], (j as u8) - 1); } println!("inbox message 0"); + expected_buffer = bytebuffer.0; let len = wavm_read_inbox_message(0, bytebuffer.0.as_mut_ptr(), INBOX_HEADER_LEN); - assert_eq!(len, 1); - assert_eq!(bytebuffer.0[0], 0xaa); + expected_buffer[0] = 0; + expected_buffer[1] = 0xaa; + assert_eq!(len, 2); + assert_eq!(bytebuffer.0, expected_buffer); println!("inbox message 1"); let len = wavm_read_inbox_message(1, bytebuffer.0.as_mut_ptr(), INBOX_HEADER_LEN); assert_eq!(len, 32); for j in 0..32 { - assert_eq!(bytebuffer.0[j], (j as u8) + 1); + assert_eq!(bytebuffer.0[j], (j as u8)); } let keccak_hash = hex!("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad"); diff --git a/contracts b/contracts index e253b8b1b5..cd5093d45e 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit e253b8b1b5865f135ac63ea3d3cea1bfe8ef2ad7 +Subproject commit cd5093d45ef0353fc5b2718ead70bd7f36e1a92c From ae7a199bfd6c00b7438f94d82f93dfa97d633c8f Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:22:21 -0700 Subject: [PATCH 32/40] Use KZG preimage support in replay binary --- cmd/replay/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/replay/main.go b/cmd/replay/main.go index b634a2d5bb..dd8a0fd1f7 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -130,9 +130,9 @@ func (r *BlobPreimageReader) GetBlobs( var blobs []kzg4844.Blob for _, h := range versionedHashes { var blob kzg4844.Blob - var preimage []byte - if true { - panic("TODO: fill in preimage with wavmio.ResolvePreimage(h, wavmio.PreimageTypeEthVersionedHash) once KZG proof support is merged into this branch") + preimage, err := wavmio.ResolveTypedPreimage(arbutil.EthVersionedHashPreimageType, h) + if err != nil { + return nil, err } if len(preimage) != len(blob) { return nil, fmt.Errorf("for blob %v got back preimage of length %v but expected blob length %v", h, len(preimage), len(blob)) From 3de7b73eb70c5711acce731981d36cb26c2dffd2 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:22:48 -0700 Subject: [PATCH 33/40] Fix arbitrator handling of non-keccak preimages --- arbitrator/jit/src/wavmio.rs | 4 ++-- arbitrator/prover/src/lib.rs | 18 +++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/arbitrator/jit/src/wavmio.rs b/arbitrator/jit/src/wavmio.rs index a398cb22f5..dfc7f21779 100644 --- a/arbitrator/jit/src/wavmio.rs +++ b/arbitrator/jit/src/wavmio.rs @@ -193,8 +193,8 @@ pub fn resolve_preimage_impl( }; let offset = match u32::try_from(offset) { - Ok(offset) => offset as usize, - Err(_) => error!("bad offset {offset} in {name}"), + Ok(offset) if offset % 32 == 0 => offset as usize, + _ => error!("bad offset {offset} in {name}"), }; let len = std::cmp::min(32, preimage.len().saturating_sub(offset)); diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 8285c011df..c7610ab31f 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -19,7 +19,6 @@ use crate::machine::{argument_data_to_inbox, Machine}; use arbutil::PreimageType; use eyre::Result; use machine::{get_empty_preimage_resolver, GlobalState, MachineStatus, PreimageResolver}; -use sha3::{Digest, Keccak256}; use static_assertions::const_assert_eq; use std::{ ffi::CStr, @@ -303,13 +302,18 @@ pub unsafe extern "C" fn arbitrator_set_preimage_resolver( return None; } let data = CBytes::from_raw_parts(res.ptr, res.len as usize); - let have_hash = Keccak256::digest(&data); - if have_hash.as_slice() != *hash { - panic!( - "Resolved incorrect data for hash {}: got {}", + #[cfg(debug_assertions)] + match crate::utils::hash_preimage(&data, ty) { + Ok(have_hash) if have_hash.as_slice() == *hash => {} + Ok(got_hash) => panic!( + "Resolved incorrect data for hash {} (rehashed to {})", hash, - hex::encode(data), - ); + Bytes32(got_hash), + ), + Err(err) => panic!( + "Failed to hash preimage from resolver (expecting hash {}): {}", + hash, err, + ), } Some(data) }, From e82ac7272443055041ef286af187fb225e44d8ea Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:23:04 -0700 Subject: [PATCH 34/40] Provide blob preimages during validation --- arbnode/inbox_reader.go | 15 ++++--- arbnode/node.go | 1 + arbnode/transaction_streamer.go | 2 +- execution/gethexec/block_recorder.go | 7 +-- execution/gethexec/executionengine.go | 5 ++- execution/interface.go | 2 +- staker/block_validator.go | 30 +++++++------ staker/stateless_block_validator.go | 56 ++++++++++++++++-------- system_tests/full_challenge_impl_test.go | 4 +- system_tests/staker_test.go | 2 + validator/validation_entry.go | 5 ++- 11 files changed, 81 insertions(+), 48 deletions(-) diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 9c830e3c89..5fca3c7eea 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" flag "github.com/spf13/pflag" @@ -401,7 +402,8 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } log.Warn("missing mentioned batch in L1 message lookup", "batch", batchNum) } - return r.GetSequencerMessageBytes(ctx, batchNum) + data, _, err := r.GetSequencerMessageBytes(ctx, batchNum) + return data, err }) if err != nil { return err @@ -570,24 +572,25 @@ func (r *InboxReader) getNextBlockToRead() (*big.Int, error) { return msgBlock, nil } -func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, error) { +func (r *InboxReader) GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) { metadata, err := r.tracker.GetBatchMetadata(seqNum) if err != nil { - return nil, err + return nil, common.Hash{}, err } blockNum := arbmath.UintToBig(metadata.ParentChainBlock) seqBatches, err := r.sequencerInbox.LookupBatchesInRange(ctx, blockNum, blockNum) if err != nil { - return nil, err + return nil, common.Hash{}, err } var seenBatches []uint64 for _, batch := range seqBatches { if batch.SequenceNumber == seqNum { - return batch.Serialize(ctx, r.client) + data, err := batch.Serialize(ctx, r.client) + return data, batch.BlockHash, err } seenBatches = append(seenBatches, batch.SequenceNumber) } - return nil, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) + return nil, common.Hash{}, fmt.Errorf("sequencer batch %v not found in L1 block %v (found batches %v)", seqNum, metadata.ParentChainBlock, seenBatches) } func (r *InboxReader) GetLastReadBlockAndBatchCount() (uint64, uint64) { diff --git a/arbnode/node.go b/arbnode/node.go index 5990cfdbec..de9745f2a8 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -542,6 +542,7 @@ func createNodeImpl( exec, rawdb.NewTable(arbDb, storage.BlockValidatorPrefix), daReader, + blobReader, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, stack, ) diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 24ef2a7cc4..f96d51ce0e 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -820,7 +820,7 @@ func (s *TransactionStreamer) addMessagesAndEndBatchImpl(messageStartPos arbutil return nil } -func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, error) { +func (s *TransactionStreamer) FetchBatch(batchNum uint64) ([]byte, common.Hash, error) { return s.inboxReader.GetSequencerMessageBytes(context.TODO(), batchNum) } diff --git a/execution/gethexec/block_recorder.go b/execution/gethexec/block_recorder.go index a0f6d837e4..d7e702f3c1 100644 --- a/execution/gethexec/block_recorder.go +++ b/execution/gethexec/block_recorder.go @@ -123,13 +123,14 @@ func (r *BlockRecorder) RecordBlockCreation( var readBatchInfo []validator.BatchInfo if msg != nil { batchFetcher := func(batchNum uint64) ([]byte, error) { - data, err := r.execEngine.streamer.FetchBatch(batchNum) + data, blockHash, err := r.execEngine.streamer.FetchBatch(batchNum) if err != nil { return nil, err } readBatchInfo = append(readBatchInfo, validator.BatchInfo{ - Number: batchNum, - Data: data, + Number: batchNum, + BlockHash: blockHash, + Data: data, }) return data, nil } diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 58e91a197e..20e9ca6f3b 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -449,7 +449,10 @@ func (s *ExecutionEngine) createBlockFromNextMessage(msg *arbostypes.MessageWith statedb, s.bc, s.bc.Config(), - s.streamer.FetchBatch, + func(batchNum uint64) ([]byte, error) { + data, _, err := s.streamer.FetchBatch(batchNum) + return data, err + }, ) return block, statedb, receipts, err diff --git a/execution/interface.go b/execution/interface.go index ef9409b9c1..5f7c01719e 100644 --- a/execution/interface.go +++ b/execution/interface.go @@ -73,7 +73,7 @@ type FullExecutionClient interface { // not implemented in execution, used as input type BatchFetcher interface { - FetchBatch(batchNum uint64) ([]byte, error) + FetchBatch(batchNum uint64) ([]byte, common.Hash, error) } type TransactionStreamer interface { diff --git a/staker/block_validator.go b/staker/block_validator.go index 352335a5db..fad5e9c39a 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -45,11 +45,12 @@ type BlockValidator struct { chainCaughtUp bool // can only be accessed from creation thread or if holding reorg-write - nextCreateBatch []byte - nextCreateBatchMsgCount arbutil.MessageIndex - nextCreateBatchReread bool - nextCreateStartGS validator.GoGlobalState - nextCreatePrevDelayed uint64 + nextCreateBatch []byte + nextCreateBatchBlockHash common.Hash + nextCreateBatchMsgCount arbutil.MessageIndex + nextCreateBatchReread bool + nextCreateStartGS validator.GoGlobalState + nextCreatePrevDelayed uint64 // can only be accessed from from validation thread or if holding reorg-write lastValidGS validator.GoGlobalState @@ -455,23 +456,23 @@ func (v *BlockValidator) SetCurrentWasmModuleRoot(hash common.Hash) error { ) } -func (v *BlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, arbutil.MessageIndex, error) { +func (v *BlockValidator) readBatch(ctx context.Context, batchNum uint64) (bool, []byte, common.Hash, arbutil.MessageIndex, error) { batchCount, err := v.inboxTracker.GetBatchCount() if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } if batchCount <= batchNum { - return false, nil, 0, nil + return false, nil, common.Hash{}, 0, nil } batchMsgCount, err := v.inboxTracker.GetBatchMessageCount(batchNum) if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } - batch, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum) + batch, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, batchNum) if err != nil { - return false, nil, 0, err + return false, nil, common.Hash{}, 0, err } - return true, batch, batchMsgCount, nil + return true, batch, batchBlockHash, batchMsgCount, nil } func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, error) { @@ -500,11 +501,12 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e } if v.nextCreateStartGS.PosInBatch == 0 || v.nextCreateBatchReread { // new batch - found, batch, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch) + found, batch, batchBlockHash, count, err := v.readBatch(ctx, v.nextCreateStartGS.Batch) if !found { return false, err } v.nextCreateBatch = batch + v.nextCreateBatchBlockHash = batchBlockHash v.nextCreateBatchMsgCount = count validatorMsgCountCurrentBatch.Update(int64(count)) v.nextCreateBatchReread = false @@ -522,7 +524,7 @@ func (v *BlockValidator) createNextValidationEntry(ctx context.Context) (bool, e } else { return false, fmt.Errorf("illegal batch msg count %d pos %d batch %d", v.nextCreateBatchMsgCount, pos, endGS.Batch) } - entry, err := newValidationEntry(pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreatePrevDelayed) + entry, err := newValidationEntry(pos, v.nextCreateStartGS, endGS, msg, v.nextCreateBatch, v.nextCreateBatchBlockHash, v.nextCreatePrevDelayed) if err != nil { return false, err } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index d35304be27..13b16e42cd 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -39,6 +39,7 @@ type StatelessBlockValidator struct { streamer TransactionStreamerInterface db ethdb.Database daService arbstate.DataAvailabilityReader + blobReader arbstate.BlobReader moduleMutex sync.Mutex currentWasmModuleRoot common.Hash @@ -67,7 +68,7 @@ type TransactionStreamerInterface interface { } type InboxReaderInterface interface { - GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, error) + GetSequencerMessageBytes(ctx context.Context, seqNum uint64) ([]byte, common.Hash, error) } type GlobalStatePosition struct { @@ -186,11 +187,13 @@ func newValidationEntry( end validator.GoGlobalState, msg *arbostypes.MessageWithMetadata, batch []byte, + batchBlockHash common.Hash, prevDelayed uint64, ) (*validationEntry, error) { batchInfo := validator.BatchInfo{ - Number: start.Batch, - Data: batch, + Number: start.Batch, + BlockHash: batchBlockHash, + Data: batch, } hasDelayed := false var delayedNum uint64 @@ -219,6 +222,7 @@ func NewStatelessBlockValidator( recorder execution.ExecutionRecorder, arbdb ethdb.Database, das arbstate.DataAvailabilityReader, + blobReader arbstate.BlobReader, config func() *BlockValidatorConfig, stack *node.Node, ) (*StatelessBlockValidator, error) { @@ -235,6 +239,7 @@ func NewStatelessBlockValidator( streamer: streamer, db: arbdb, daService: das, + blobReader: blobReader, } return validator, nil } @@ -285,20 +290,35 @@ func (v *StatelessBlockValidator) ValidationEntryRecord(ctx context.Context, e * continue } if arbstate.IsBlobHashesHeaderByte(batch.Data[40]) { - // TODO: fetch blob preimages - panic("TODO: fetch blob preimages") - } - if !arbstate.IsDASMessageHeaderByte(batch.Data[40]) { - continue - } - if v.daService == nil { - log.Warn("No DAS configured, but sequencer message found with DAS header") - } else { - _, err := arbstate.RecoverPayloadFromDasBatch( - ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, - ) + payload := batch.Data[41:] + if len(payload)%len(common.Hash{}) != 0 { + return fmt.Errorf("blob batch data is not a list of hashes as expected") + } + versionedHashes := make([]common.Hash, len(payload)/len(common.Hash{})) + for i := 0; i*32 < len(payload); i += 1 { + copy(versionedHashes[i][:], payload[i*32:(i+1)*32]) + } + blobs, err := v.blobReader.GetBlobs(ctx, batch.BlockHash, versionedHashes) if err != nil { - return err + return fmt.Errorf("failed to get blobs: %w", err) + } + if e.Preimages[arbutil.EthVersionedHashPreimageType] == nil { + e.Preimages[arbutil.EthVersionedHashPreimageType] = make(map[common.Hash][]byte) + } + for i, blob := range blobs { + e.Preimages[arbutil.EthVersionedHashPreimageType][versionedHashes[i]] = blob[:] + } + } + if arbstate.IsDASMessageHeaderByte(batch.Data[40]) { + if v.daService == nil { + log.Warn("No DAS configured, but sequencer message found with DAS header") + } else { + _, err := arbstate.RecoverPayloadFromDasBatch( + ctx, batch.Number, batch.Data, v.daService, e.Preimages, arbstate.KeysetValidate, + ) + if err != nil { + return err + } } } } @@ -363,11 +383,11 @@ func (v *StatelessBlockValidator) CreateReadyValidationEntry(ctx context.Context } start := buildGlobalState(*prevResult, startPos) end := buildGlobalState(*result, endPos) - seqMsg, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber) + seqMsg, batchBlockHash, err := v.inboxReader.GetSequencerMessageBytes(ctx, startPos.BatchNumber) if err != nil { return nil, err } - entry, err := newValidationEntry(pos, start, end, msg, seqMsg, prevDelayed) + entry, err := newValidationEntry(pos, start, end, msg, seqMsg, batchBlockHash, prevDelayed) if err != nil { return nil, err } diff --git a/system_tests/full_challenge_impl_test.go b/system_tests/full_challenge_impl_test.go index 0fa483b6ea..b8f891e3e7 100644 --- a/system_tests/full_challenge_impl_test.go +++ b/system_tests/full_challenge_impl_test.go @@ -385,7 +385,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall confirmLatestBlock(ctx, t, l1Info, l1Backend) - asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } @@ -402,7 +402,7 @@ func RunChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, chall if err != nil { Fatal(t, err) } - challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, nil, nil, StaticFetcherFrom(t, &conf.BlockValidator), valStack) if err != nil { Fatal(t, err) } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 6e3ffd6125..62e89ff782 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -208,6 +208,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeA, l2nodeA.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) @@ -260,6 +261,7 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) execNodeB, l2nodeB.ArbDB, nil, + nil, StaticFetcherFrom(t, &blockValidatorConfig), valStack, ) diff --git a/validator/validation_entry.go b/validator/validation_entry.go index fed1940f1f..8bb021335e 100644 --- a/validator/validation_entry.go +++ b/validator/validation_entry.go @@ -6,8 +6,9 @@ import ( ) type BatchInfo struct { - Number uint64 - Data []byte + Number uint64 + BlockHash common.Hash + Data []byte } type ValidationInput struct { From 59a8935a9bb651ce43370e441db2cae702a2f004 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:31:50 -0700 Subject: [PATCH 35/40] Fix arbitrator CI --- .github/workflows/arbitrator-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index ba3cf016f5..54a948e04a 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -161,4 +161,5 @@ jobs: cd contracts yarn install yarn build + yarn build:forge:yul yarn hardhat --network localhost test test/prover/*.ts From f646430e65d44936e984ec6e4d8cb47044801ba0 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 20:43:42 -0700 Subject: [PATCH 36/40] Fix Go test --- system_tests/meaningless_reorg_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system_tests/meaningless_reorg_test.go b/system_tests/meaningless_reorg_test.go index f09f68041a..e1715dc635 100644 --- a/system_tests/meaningless_reorg_test.go +++ b/system_tests/meaningless_reorg_test.go @@ -95,7 +95,7 @@ func TestMeaninglessBatchReorg(t *testing.T) { time.Sleep(10 * time.Millisecond) } - _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) + _, _, err = builder.L2.ConsensusNode.InboxReader.GetSequencerMessageBytes(ctx, 1) Require(t, err) l2Header, err := builder.L2.Client.HeaderByNumber(ctx, l2Receipt.BlockNumber) From d2b7c9db14667046d08557d1503cf38dad267117 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 21:26:43 -0700 Subject: [PATCH 37/40] Strengthen Uint64String parsing --- util/jsonapi/uint64_string.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/util/jsonapi/uint64_string.go b/util/jsonapi/uint64_string.go index 27cbb18526..980b97a949 100644 --- a/util/jsonapi/uint64_string.go +++ b/util/jsonapi/uint64_string.go @@ -4,6 +4,7 @@ package jsonapi import ( + "encoding/json" "fmt" "strconv" ) @@ -12,13 +13,18 @@ import ( type Uint64String uint64 func (u *Uint64String) UnmarshalJSON(b []byte) error { - s := string(b) - if s == "null" { + jsonString := string(b) + if jsonString == "null" { return nil } - // Parse string as uint64, removing quotes - value, err := strconv.ParseUint(s[1:len(s)-1], 10, 64) + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + value, err := strconv.ParseUint(s, 10, 64) if err != nil { return err } From 277f241004def543f547bdb65aa4688216994bbc Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 23 Jan 2024 23:40:07 -0700 Subject: [PATCH 38/40] Pack more bits into blobs --- util/blobs/blobs.go | 74 ++++++++++++++++++++++++++++++++-------- util/blobs/blobs_test.go | 52 ++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 util/blobs/blobs_test.go diff --git a/util/blobs/blobs.go b/util/blobs/blobs.go index 55df57f9d1..2852f2b29f 100644 --- a/util/blobs/blobs.go +++ b/util/blobs/blobs.go @@ -14,6 +14,44 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +func fillBlobBytes(blob []byte, data []byte) []byte { + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + startIdx := fieldElement*32 + 1 + copy(blob[startIdx:startIdx+31], data) + if len(data) <= 31 { + return nil + } + data = data[31:] + } + return data +} + +// The number of bits in a BLS scalar that aren't part of a whole byte. +const spareBlobBits = 6 // = math.floor(math.log2(BLS_MODULUS)) % 8 + +func fillBlobBits(blob []byte, data []byte) ([]byte, error) { + var acc uint16 + accBits := 0 + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + if accBits < spareBlobBits && len(data) > 0 { + acc |= uint16(data[0]) << accBits + accBits += 8 + data = data[1:] + } + blob[fieldElement*32] = uint8(acc & ((1 << spareBlobBits) - 1)) + accBits -= spareBlobBits + if accBits < 0 { + // We're out of data + break + } + acc >>= spareBlobBits + } + if accBits > 0 { + return nil, fmt.Errorf("somehow ended up with %v spare accBits", accBits) + } + return data, nil +} + // EncodeBlobs takes in raw bytes data to convert into blobs used for KZG commitment EIP-4844 // transactions on Ethereum. func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { @@ -21,21 +59,15 @@ func EncodeBlobs(data []byte) ([]kzg4844.Blob, error) { if err != nil { return nil, err } - blobs := []kzg4844.Blob{{}} - blobIndex := 0 - fieldIndex := -1 - for i := 0; i < len(data); i += 31 { - fieldIndex++ - if fieldIndex == params.BlobTxFieldElementsPerBlob { - blobs = append(blobs, kzg4844.Blob{}) - blobIndex++ - fieldIndex = 0 - } - max := i + 31 - if max > len(data) { - max = len(data) + var blobs []kzg4844.Blob + for len(data) > 0 { + var b kzg4844.Blob + data = fillBlobBytes(b[:], data) + data, err = fillBlobBits(b[:], data) + if err != nil { + return nil, err } - copy(blobs[blobIndex][fieldIndex*32+1:], data[i:max]) + blobs = append(blobs, b) } return blobs, nil } @@ -47,6 +79,20 @@ func DecodeBlobs(blobs []kzg4844.Blob) ([]byte, error) { for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { rlpData = append(rlpData, blob[fieldIndex*32+1:(fieldIndex+1)*32]...) } + var acc uint16 + accBits := 0 + for fieldIndex := 0; fieldIndex < params.BlobTxFieldElementsPerBlob; fieldIndex++ { + acc |= uint16(blob[fieldIndex*32]) << accBits + accBits += spareBlobBits + if accBits >= 8 { + rlpData = append(rlpData, uint8(acc)) + acc >>= 8 + accBits -= 8 + } + } + if accBits != 0 { + return nil, fmt.Errorf("somehow ended up with %v spare accBits", accBits) + } } var outputData []byte err := rlp.Decode(bytes.NewReader(rlpData), &outputData) diff --git a/util/blobs/blobs_test.go b/util/blobs/blobs_test.go new file mode 100644 index 0000000000..753b50a489 --- /dev/null +++ b/util/blobs/blobs_test.go @@ -0,0 +1,52 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package blobs + +import ( + "bytes" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/params" +) + +const bytesEncodedPerBlob = 254 * 4096 / 8 + +var blsModulus, _ = new(big.Int).SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10) + +func TestBlobEncoding(t *testing.T) { + r := rand.New(rand.NewSource(1)) +outer: + for i := 0; i < 40; i++ { + data := make([]byte, r.Int()%bytesEncodedPerBlob*3) + _, err := r.Read(data) + if err != nil { + t.Fatalf("failed to generate random bytes: %v", err) + } + enc, err := EncodeBlobs(data) + if err != nil { + t.Errorf("failed to encode blobs for length %v: %v", len(data), err) + continue + } + for _, b := range enc { + for fieldElement := 0; fieldElement < params.BlobTxFieldElementsPerBlob; fieldElement++ { + bigInt := new(big.Int).SetBytes(b[fieldElement*32 : (fieldElement+1)*32]) + if bigInt.Cmp(blsModulus) >= 0 { + t.Errorf("for length %v blob %v has field element %v value %v >= modulus %v", len(data), b, fieldElement, bigInt, blsModulus) + continue outer + } + } + } + dec, err := DecodeBlobs(enc) + if err != nil { + t.Errorf("failed to decode blobs for length %v: %v", len(data), err) + continue + } + if !bytes.Equal(data, dec) { + t.Errorf("got different decoding for length %v", len(data)) + continue + } + } +} From 03b2fe8a801d5429b759603a1fd12d9f8d4186f6 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 24 Jan 2024 08:13:20 -0700 Subject: [PATCH 39/40] Add separate max batch size for 4844 --- arbnode/batch_poster.go | 66 ++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 24 deletions(-) diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 4a07d36521..65ef76e0a0 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -118,6 +118,8 @@ type BatchPosterConfig struct { DisableDasFallbackStoreDataOnChain bool `koanf:"disable-das-fallback-store-data-on-chain" reload:"hot"` // Max batch size. MaxSize int `koanf:"max-size" reload:"hot"` + // Maximum 4844 blob enabled batch size. + Max4844BatchSize int `koanf:"max-4844-batch-size" reload:"hot"` // Max batch post delay. MaxDelay time.Duration `koanf:"max-delay" reload:"hot"` // Wait for max BatchPost delay. @@ -174,6 +176,7 @@ func BatchPosterConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBatchPosterConfig.Enable, "enable posting batches to l1") f.Bool(prefix+".disable-das-fallback-store-data-on-chain", DefaultBatchPosterConfig.DisableDasFallbackStoreDataOnChain, "If unable to batch to DAS, disable fallback storing data on chain") f.Int(prefix+".max-size", DefaultBatchPosterConfig.MaxSize, "maximum batch size") + f.Int(prefix+".max-4844-batch-size", DefaultBatchPosterConfig.Max4844BatchSize, "maximum 4844 blob enabled batch size") f.Duration(prefix+".max-delay", DefaultBatchPosterConfig.MaxDelay, "maximum batch posting delay") f.Bool(prefix+".wait-for-max-delay", DefaultBatchPosterConfig.WaitForMaxDelay, "wait for the max batch delay, even if the batch is full") f.Duration(prefix+".poll-interval", DefaultBatchPosterConfig.PollInterval, "how long to wait after no batches are ready to be posted before checking again") @@ -197,7 +200,9 @@ var DefaultBatchPosterConfig = BatchPosterConfig{ Enable: false, DisableDasFallbackStoreDataOnChain: false, // This default is overridden for L3 chains in applyChainParameters in cmd/nitro/nitro.go - MaxSize: 100000, + MaxSize: 100000, + // TODO: is 1000 bytes an appropriate margin for error vs blob space efficiency? + Max4844BatchSize: (254 * params.BlobTxFieldElementsPerBlob / 8 * (params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob)) - 1000, PollInterval: time.Second * 10, ErrorDelay: time.Second * 10, MaxDelay: time.Hour, @@ -227,6 +232,7 @@ var DefaultBatchPosterL1WalletConfig = genericconf.WalletConfig{ var TestBatchPosterConfig = BatchPosterConfig{ Enable: true, MaxSize: 100000, + Max4844BatchSize: DefaultBatchPosterConfig.Max4844BatchSize, PollInterval: time.Millisecond * 10, ErrorDelay: time.Millisecond * 10, MaxDelay: 0, @@ -552,13 +558,20 @@ type buildingBatch struct { startMsgCount arbutil.MessageIndex msgCount arbutil.MessageIndex haveUsefulMessage bool + use4844 bool } -func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64) *batchSegments { - compressedBuffer := bytes.NewBuffer(make([]byte, 0, config.MaxSize*2)) - if config.MaxSize <= 40 { - panic("MaxBatchSize too small") +func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog uint64, use4844 bool) *batchSegments { + maxSize := config.MaxSize + if use4844 { + maxSize = config.Max4844BatchSize + } else { + if maxSize <= 40 { + panic("Maximum batch size too small") + } + maxSize -= 40 } + compressedBuffer := bytes.NewBuffer(make([]byte, 0, maxSize*2)) compressionLevel := config.CompressionLevel recompressionLevel := config.CompressionLevel if backlog > 20 { @@ -582,7 +595,7 @@ func newBatchSegments(firstDelayed uint64, config *BatchPosterConfig, backlog ui return &batchSegments{ compressedBuffer: compressedBuffer, compressedWriter: brotli.NewWriterLevel(compressedBuffer, compressionLevel), - sizeLimit: config.MaxSize - 40, // TODO + sizeLimit: maxSize, recompressionLevel: recompressionLevel, rawSegments: make([][]byte, 0, 128), delayedMsg: firstDelayed, @@ -936,10 +949,29 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } if b.building == nil || b.building.startMsgCount != batchPosition.MessageCount { + latestHeader, err := b.l1Reader.LastHeader(ctx) + if err != nil { + return false, err + } + var use4844 bool + config := b.config() + if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.ForcePost4844Blobs { + use4844 = true + } else { + blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) + blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) + blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) + + calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) + use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) + } + } b.building = &buildingBatch{ - segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate()), + segments: newBatchSegments(batchPosition.DelayedMessageCount, b.config(), b.GetBacklogEstimate(), use4844), msgCount: batchPosition.MessageCount, startMsgCount: batchPosition.MessageCount, + use4844: use4844, } } msgCount, err := b.streamer.GetMessageCount() @@ -1115,26 +1147,12 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } } - latestHeader, err := b.l1Reader.LastHeader(ctx) + data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, b.building.use4844) if err != nil { return false, err } - var use4844 bool - if config.Post4844Blobs && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { - if config.ForcePost4844Blobs { - use4844 = true - } else { - blobFeePerByte := eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*latestHeader.ExcessBlobGas, *latestHeader.BlobGasUsed)) - blobFeePerByte.Mul(blobFeePerByte, blobTxBlobGasPerBlob) - blobFeePerByte.Div(blobFeePerByte, usableBytesInBlob) - - calldataFeePerByte := arbmath.BigMulByUint(latestHeader.BaseFee, 16) - use4844 = arbmath.BigLessThan(blobFeePerByte, calldataFeePerByte) - } - } - data, kzgBlobs, err := b.encodeAddBatch(new(big.Int).SetUint64(batchPosition.NextSeqNum), batchPosition.MessageCount, b.building.msgCount, sequencerMsg, b.building.segments.delayedMsg, use4844) - if err != nil { - return false, err + if len(kzgBlobs)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + return false, fmt.Errorf("produced %v blobs for batch but a block can only hold %v", len(kzgBlobs), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) } accessList := b.accessList(int(batchPosition.NextSeqNum), int(b.building.segments.delayedMsg)) // On restart, we may be trying to estimate gas for a batch whose successor has From 1ba8e075fa8b4ca0a096c1a44e72c8ee6b9fd229 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sat, 27 Jan 2024 23:06:59 -0700 Subject: [PATCH 40/40] Move contracts back to previous pin --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index a8e7709bfc..cd5093d45e 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit a8e7709bfc918f9b8e2888d47f2fd8454779fd11 +Subproject commit cd5093d45ef0353fc5b2718ead70bd7f36e1a92c