From cd13bd91cd8caa6b8c62fff904c0ce84e3fa455e Mon Sep 17 00:00:00 2001 From: Harsh Vardhan Roy <42067944+royvardhan@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:42:44 +0530 Subject: [PATCH 01/70] feat: unify ReceiptWithBloom from Alloy (#13088) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 2 + crates/ethereum/consensus/src/validation.rs | 5 +- crates/net/eth-wire-types/src/receipts.rs | 10 +- crates/net/network/src/message.rs | 2 +- crates/optimism/consensus/Cargo.toml | 1 + crates/optimism/consensus/src/proof.rs | 29 +- crates/optimism/consensus/src/validation.rs | 3 +- crates/primitives/src/lib.rs | 6 +- crates/primitives/src/proofs.rs | 23 +- crates/primitives/src/receipt.rs | 549 +++++++------------- crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- crates/trie/sparse/src/state.rs | 1 - crates/trie/trie/Cargo.toml | 2 + crates/trie/trie/benches/trie_root.rs | 22 +- 14 files changed, 236 insertions(+), 421 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd893495bc20..fb3910d40e57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8351,6 +8351,7 @@ name = "reth-optimism-consensus" version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-trie", "reth-chainspec", @@ -9455,6 +9456,7 @@ name = "reth-trie" version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-trie", diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index f990ecc57d82..c339c8d25c6f 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,3 +1,4 @@ +use alloy_consensus::{proofs::calculate_receipt_root, TxReceipt}; use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; @@ -62,10 +63,10 @@ fn verify_receipts( ) -> Result<(), ConsensusError> { // Calculate receipts root. let receipts_with_bloom = receipts.iter().map(Receipt::with_bloom_ref).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root_ref(&receipts_with_bloom); + let receipts_root = calculate_receipt_root(&receipts_with_bloom); // Calculate header logs bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom()); compare_receipts_root_and_logs_bloom( receipts_root, diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index ca5e85a146f8..2bad4287f2e1 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::ReceiptWithBloom; +use reth_primitives::{Receipt, ReceiptWithBloom}; /// A request for transaction receipts from the given block hashes. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -23,7 +23,7 @@ pub struct GetReceipts( #[add_arbitrary_tests(rlp)] pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. - pub Vec>, + pub Vec>>, ); #[cfg(test)] @@ -37,7 +37,7 @@ mod tests { fn roundtrip_eip1559() { let receipts = Receipts(vec![vec![ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Eip1559, ..Default::default() }, - bloom: Default::default(), + logs_bloom: Default::default(), }]]); let mut out = vec![]; @@ -108,7 +108,7 @@ mod tests { success: false, ..Default::default() }, - bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, ]]), }; @@ -145,7 +145,7 @@ mod tests { success: false, ..Default::default() }, - bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, ], ]), diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index e88ccb54c369..ff5093b67328 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -150,7 +150,7 @@ pub enum PeerResponseResult { /// Represents a result containing node data or an error. NodeData(RequestResult>), /// Represents a result containing receipts or an error. - Receipts(RequestResult>>), + Receipts(RequestResult>>>), } // === impl PeerResponseResult === diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index faece6eacf87..4f4868a454dc 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -26,6 +26,7 @@ reth-optimism-chainspec.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde"] } # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-trie.workspace = true diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 18e64a467ff1..df0669568b3d 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,14 +1,15 @@ //! Helper function for Receipt root calculation for Optimism hardforks. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; use reth_optimism_forks::OpHardfork; -use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use reth_primitives::{Receipt, ReceiptWithBloom}; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( - receipts: &[ReceiptWithBloom], + receipts: &[ReceiptWithBloom], chain_spec: &ChainSpec, timestamp: u64, ) -> B256 { @@ -29,12 +30,10 @@ pub(crate) fn calculate_receipt_root_optimism( }) .collect::>(); - return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| { - r.encode_inner(buf, false) - }) + return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| r.encode_2718(buf)) } - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) + ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_2718(buf)) } /// Calculates the receipt root for a header for the reference type of [Receipt]. @@ -63,12 +62,12 @@ pub fn calculate_receipt_root_no_memo_optimism( .collect::>(); return ordered_trie_root_with_encoder(&receipts, |r, buf| { - ReceiptWithBloomRef::from(r).encode_inner(buf, false) + r.with_bloom_ref().encode_2718(buf); }) } ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) + r.with_bloom_ref().encode_2718(buf); }) } @@ -123,7 +122,7 @@ mod tests { deposit_nonce: Some(4012991u64), deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), }, // 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a ReceiptWithBloom { @@ -169,7 +168,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), + logs_bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), }, // 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266 ReceiptWithBloom { @@ -211,7 +210,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), }, // 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f ReceiptWithBloom { @@ -283,7 +282,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), + logs_bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), }, // 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351 ReceiptWithBloom { @@ -325,7 +324,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), }, ]; let root = calculate_receipt_root_optimism(&receipts, BASE_SEPOLIA.as_ref(), case.1); @@ -339,7 +338,7 @@ mod tests { address: Address::ZERO, data: LogData::new_unchecked(vec![], Default::default()), }]; - let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); + let logs_bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); let receipt = ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Eip2930, @@ -349,7 +348,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom, + logs_bloom, }; let receipt = vec![receipt]; let root = calculate_receipt_root_optimism(&receipt, BASE_SEPOLIA.as_ref(), 0); diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 3a76ec138547..5290603e7b89 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,4 +1,5 @@ use crate::proof::calculate_receipt_root_optimism; +use alloy_consensus::TxReceipt; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; @@ -57,7 +58,7 @@ fn verify_receipts( calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); // Calculate header logs bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom()); compare_receipts_root_and_logs_bloom( receipts_root, diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 2844c9397b83..edbc73a9362d 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -40,9 +40,7 @@ pub use block::{ }; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use receipt::{ - gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, -}; +pub use receipt::{gas_spent_by_transactions, Receipt, Receipts}; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, LogData, NodePrimitives, SealedHeader, StorageEntry, @@ -56,6 +54,8 @@ pub use transaction::{ TransactionSigned, TransactionSignedEcRecovered, TxType, }; +pub use alloy_consensus::ReceiptWithBloom; + // Re-exports pub use reth_ethereum_forks::*; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 2a1d5b6982b4..4711da0934c3 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,9 +1,12 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use crate::Receipt; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; +pub use alloy_consensus::proofs::calculate_receipt_root; + /// Calculate a transaction root. /// /// `(rlp(index), encoded(tx))` pairs. @@ -18,23 +21,11 @@ pub use alloy_consensus::proofs::calculate_withdrawals_root; #[doc(inline)] pub use alloy_consensus::proofs::calculate_ommers_root; -/// Calculates the receipt root for a header. -pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} - -/// Calculates the receipt root for a header. -pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} - /// Calculates the receipt root for a header for the reference type of [Receipt]. /// /// NOTE: Prefer [`calculate_receipt_root`] if you have log blooms memoized. pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) - }) + ordered_trie_root_with_encoder(receipts, |r, buf| r.with_bloom_ref().encode_2718(buf)) } #[cfg(test)] @@ -67,6 +58,8 @@ mod tests { #[cfg(not(feature = "optimism"))] #[test] fn check_receipt_root_optimism() { + use alloy_consensus::ReceiptWithBloom; + let logs = vec![Log { address: Address::ZERO, data: LogData::new_unchecked(vec![], Default::default()), @@ -79,7 +72,7 @@ mod tests { cumulative_gas_used: 102068, logs, }, - bloom, + logs_bloom: bloom, }; let receipt = vec![receipt]; let root = calculate_receipt_root(&receipt); diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 2e8e269e711e..419c36c2080b 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,15 +1,13 @@ use alloc::{vec, vec::Vec}; -use core::cmp::Ordering; use reth_primitives_traits::InMemorySize; use alloy_consensus::{ - constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, - Eip658Value, TxReceipt, Typed2718, + Eip2718EncodableReceipt, Eip658Value, ReceiptWithBloom, RlpDecodableReceipt, + RlpEncodableReceipt, TxReceipt, Typed2718, }; -use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Bloom, Log, B256}; -use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; -use bytes::{Buf, BufMut}; +use alloy_rlp::{Decodable, Encodable, Header, RlpDecodable, RlpEncodable}; +use bytes::BufMut; use derive_more::{DerefMut, From, IntoIterator}; use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; @@ -61,15 +59,180 @@ impl Receipt { /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container /// type. - pub fn with_bloom(self) -> ReceiptWithBloom { + pub fn with_bloom(self) -> ReceiptWithBloom { self.into() } - /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloomRef`] + /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] /// container type. - pub fn with_bloom_ref(&self) -> ReceiptWithBloomRef<'_> { + pub fn with_bloom_ref(&self) -> ReceiptWithBloom<&Self> { self.into() } + + /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { + let len = self.success.length() + + self.cumulative_gas_used.length() + + bloom.length() + + self.logs.length(); + + #[cfg(feature = "optimism")] + if self.tx_type == TxType::Deposit { + let mut len = len; + + if let Some(deposit_nonce) = self.deposit_nonce { + len += deposit_nonce.length(); + } + if let Some(deposit_receipt_version) = self.deposit_receipt_version { + len += deposit_receipt_version.length(); + } + + return len + } + + len + } + + /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) { + self.success.encode(out); + self.cumulative_gas_used.encode(out); + bloom.encode(out); + self.logs.encode(out); + + #[cfg(feature = "optimism")] + if self.tx_type == TxType::Deposit { + if let Some(nonce) = self.deposit_nonce { + nonce.encode(out); + } + if let Some(version) = self.deposit_receipt_version { + version.encode(out); + } + } + } + + /// Returns RLP header for inner encoding. + pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) } + } + + fn decode_receipt_with_bloom( + buf: &mut &[u8], + tx_type: TxType, + ) -> alloy_rlp::Result> { + let b = &mut &**buf; + let rlp_head = alloy_rlp::Header::decode(b)?; + if !rlp_head.list { + return Err(alloy_rlp::Error::UnexpectedString) + } + let started_len = b.len(); + + let success = Decodable::decode(b)?; + let cumulative_gas_used = Decodable::decode(b)?; + let bloom = Decodable::decode(b)?; + let logs = Decodable::decode(b)?; + + let receipt = match tx_type { + #[cfg(feature = "optimism")] + TxType::Deposit => { + let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; + let deposit_nonce = remaining(b).then(|| Decodable::decode(b)).transpose()?; + let deposit_receipt_version = + remaining(b).then(|| Decodable::decode(b)).transpose()?; + + Self { + tx_type, + success, + cumulative_gas_used, + logs, + deposit_nonce, + deposit_receipt_version, + } + } + _ => Self { + tx_type, + success, + cumulative_gas_used, + logs, + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + }; + + let this = ReceiptWithBloom { receipt, logs_bloom: bloom }; + let consumed = started_len - b.len(); + if consumed != rlp_head.payload_length { + return Err(alloy_rlp::Error::ListLengthMismatch { + expected: rlp_head.payload_length, + got: consumed, + }) + } + *buf = *b; + Ok(this) + } +} + +impl Eip2718EncodableReceipt for Receipt { + fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + self.rlp_header_inner(bloom).length_with_payload() + + !matches!(self.tx_type, TxType::Legacy) as usize // account for type prefix + } + + fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !matches!(self.tx_type, TxType::Legacy) { + out.put_u8(self.tx_type as u8); + } + self.rlp_header_inner(bloom).encode(out); + self.rlp_encode_fields(bloom, out); + } +} + +impl RlpEncodableReceipt for Receipt { + fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + let mut len = self.eip2718_encoded_length_with_bloom(bloom); + if !matches!(self.tx_type, TxType::Legacy) { + len += Header { + list: false, + payload_length: self.eip2718_encoded_length_with_bloom(bloom), + } + .length(); + } + + len + } + + fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !matches!(self.tx_type, TxType::Legacy) { + Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) } + .encode(out); + } + self.eip2718_encode_with_bloom(bloom, out); + } +} + +impl RlpDecodableReceipt for Receipt { + fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { + let header_buf = &mut &**buf; + let header = Header::decode(header_buf)?; + + if header.list { + return Self::decode_receipt_with_bloom(buf, TxType::Legacy); + } + + *buf = *header_buf; + + let remaining = buf.len(); + let tx_type = TxType::decode(buf)?; + let this = Self::decode_receipt_with_bloom(buf, tx_type)?; + + if buf.len() + header.payload_length != remaining { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(this) + } } impl TxReceipt for Receipt { @@ -183,51 +346,12 @@ impl FromIterator>> for Receipts { } } -impl From for ReceiptWithBloom { - fn from(receipt: Receipt) -> Self { - let bloom = receipt.bloom_slow(); - Self { receipt, bloom } - } -} - impl Default for Receipts { fn default() -> Self { Self { receipt_vec: Vec::new() } } } -/// [`Receipt`] with calculated bloom filter. -#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -pub struct ReceiptWithBloom { - /// Bloom filter build from logs. - pub bloom: Bloom, - /// Main receipt body - pub receipt: Receipt, -} - -impl ReceiptWithBloom { - /// Create new [`ReceiptWithBloom`] - pub const fn new(receipt: Receipt, bloom: Bloom) -> Self { - Self { receipt, bloom } - } - - /// Consume the structure, returning only the receipt - pub fn into_receipt(self) -> Receipt { - self.receipt - } - - /// Consume the structure, returning the receipt and the bloom filter - pub fn into_components(self) -> (Receipt, Bloom) { - (self.receipt, self.bloom) - } - - #[inline] - const fn as_encoder(&self) -> ReceiptWithBloomEncoder<'_> { - ReceiptWithBloomEncoder { receipt: &self.receipt, bloom: &self.bloom } - } -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Receipt { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -260,317 +384,10 @@ impl<'a> arbitrary::Arbitrary<'a> for Receipt { } } -impl Encodable2718 for ReceiptWithBloom { - fn type_flag(&self) -> Option { - match self.receipt.tx_type { - TxType::Legacy => None, - tx_type => Some(tx_type as u8), - } - } - - fn encode_2718_len(&self) -> usize { - let encoder = self.as_encoder(); - match self.receipt.tx_type { - TxType::Legacy => encoder.receipt_length(), - _ => 1 + encoder.receipt_length(), // 1 byte for the type prefix - } - } - - /// Encodes the receipt into its "raw" format. - /// This format is also referred to as "binary" encoding. - /// - /// For legacy receipts, it encodes the RLP of the receipt into the buffer: - /// `rlp([status, cumulativeGasUsed, logsBloom, logs])` as per EIP-2718. - /// For EIP-2718 typed transactions, it encodes the type of the transaction followed by the rlp - /// of the receipt: - /// - EIP-1559, 2930 and 4844 transactions: `tx-type || rlp([status, cumulativeGasUsed, - /// logsBloom, logs])` - fn encode_2718(&self, out: &mut dyn BufMut) { - self.encode_inner(out, false) - } - - fn encoded_2718(&self) -> Vec { - let mut out = vec![]; - self.encode_2718(&mut out); - out - } -} - -impl ReceiptWithBloom { - /// Encode receipt with or without the header data. - pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - self.as_encoder().encode_inner(out, with_header) - } - - /// Decodes the receipt payload - fn decode_receipt(buf: &mut &[u8], tx_type: TxType) -> alloy_rlp::Result { - let b = &mut &**buf; - let rlp_head = alloy_rlp::Header::decode(b)?; - if !rlp_head.list { - return Err(alloy_rlp::Error::UnexpectedString) - } - let started_len = b.len(); - - let success = alloy_rlp::Decodable::decode(b)?; - let cumulative_gas_used = alloy_rlp::Decodable::decode(b)?; - let bloom = Decodable::decode(b)?; - let logs = alloy_rlp::Decodable::decode(b)?; - - let receipt = match tx_type { - #[cfg(feature = "optimism")] - TxType::Deposit => { - let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; - let deposit_nonce = - remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; - let deposit_receipt_version = - remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; - - Receipt { - tx_type, - success, - cumulative_gas_used, - logs, - deposit_nonce, - deposit_receipt_version, - } - } - _ => Receipt { - tx_type, - success, - cumulative_gas_used, - logs, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - }; - - let this = Self { receipt, bloom }; - let consumed = started_len - b.len(); - if consumed != rlp_head.payload_length { - return Err(alloy_rlp::Error::ListLengthMismatch { - expected: rlp_head.payload_length, - got: consumed, - }) - } - *buf = *b; - Ok(this) - } -} - -impl Encodable for ReceiptWithBloom { - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out, true) - } - fn length(&self) -> usize { - self.as_encoder().length() - } -} - -impl Decodable for ReceiptWithBloom { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - // a receipt is either encoded as a string (non legacy) or a list (legacy). - // We should not consume the buffer if we are decoding a legacy receipt, so let's - // check if the first byte is between 0x80 and 0xbf. - let rlp_type = *buf - .first() - .ok_or(alloy_rlp::Error::Custom("cannot decode a receipt from empty bytes"))?; - - match rlp_type.cmp(&alloy_rlp::EMPTY_LIST_CODE) { - Ordering::Less => { - // strip out the string header - let _header = alloy_rlp::Header::decode(buf)?; - let receipt_type = *buf.first().ok_or(alloy_rlp::Error::Custom( - "typed receipt cannot be decoded from an empty slice", - ))?; - match receipt_type { - EIP2930_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip2930) - } - EIP1559_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip1559) - } - EIP4844_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip4844) - } - EIP7702_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip7702) - } - #[cfg(feature = "optimism")] - op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Deposit) - } - _ => Err(alloy_rlp::Error::Custom("invalid receipt type")), - } - } - Ordering::Equal => { - Err(alloy_rlp::Error::Custom("an empty list is not a valid receipt encoding")) - } - Ordering::Greater => Self::decode_receipt(buf, TxType::Legacy), - } - } -} - -/// [`Receipt`] reference type with calculated bloom filter. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ReceiptWithBloomRef<'a> { - /// Bloom filter build from logs. - pub bloom: Bloom, - /// Main receipt body - pub receipt: &'a Receipt, -} - -impl<'a> ReceiptWithBloomRef<'a> { - /// Create new [`ReceiptWithBloomRef`] - pub const fn new(receipt: &'a Receipt, bloom: Bloom) -> Self { - Self { receipt, bloom } - } - - /// Encode receipt with or without the header data. - pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - self.as_encoder().encode_inner(out, with_header) - } - - #[inline] - const fn as_encoder(&self) -> ReceiptWithBloomEncoder<'_> { - ReceiptWithBloomEncoder { receipt: self.receipt, bloom: &self.bloom } - } -} - -impl Encodable for ReceiptWithBloomRef<'_> { - fn encode(&self, out: &mut dyn BufMut) { - self.as_encoder().encode_inner(out, true) - } - fn length(&self) -> usize { - self.as_encoder().length() - } -} - -impl<'a> From<&'a Receipt> for ReceiptWithBloomRef<'a> { - fn from(receipt: &'a Receipt) -> Self { - let bloom = receipt.bloom_slow(); - ReceiptWithBloomRef { receipt, bloom } - } -} - -struct ReceiptWithBloomEncoder<'a> { - bloom: &'a Bloom, - receipt: &'a Receipt, -} - -impl ReceiptWithBloomEncoder<'_> { - /// Returns the rlp header for the receipt payload. - fn receipt_rlp_header(&self) -> alloy_rlp::Header { - let mut rlp_head = alloy_rlp::Header { list: true, payload_length: 0 }; - - rlp_head.payload_length += self.receipt.success.length(); - rlp_head.payload_length += self.receipt.cumulative_gas_used.length(); - rlp_head.payload_length += self.bloom.length(); - rlp_head.payload_length += self.receipt.logs.length(); - - #[cfg(feature = "optimism")] - if self.receipt.tx_type == TxType::Deposit { - if let Some(deposit_nonce) = self.receipt.deposit_nonce { - rlp_head.payload_length += deposit_nonce.length(); - } - if let Some(deposit_receipt_version) = self.receipt.deposit_receipt_version { - rlp_head.payload_length += deposit_receipt_version.length(); - } - } - - rlp_head - } - - /// Encodes the receipt data. - fn encode_fields(&self, out: &mut dyn BufMut) { - self.receipt_rlp_header().encode(out); - self.receipt.success.encode(out); - self.receipt.cumulative_gas_used.encode(out); - self.bloom.encode(out); - self.receipt.logs.encode(out); - #[cfg(feature = "optimism")] - if self.receipt.tx_type == TxType::Deposit { - if let Some(deposit_nonce) = self.receipt.deposit_nonce { - deposit_nonce.encode(out) - } - if let Some(deposit_receipt_version) = self.receipt.deposit_receipt_version { - deposit_receipt_version.encode(out) - } - } - } - - /// Encode receipt with or without the header data. - fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - if matches!(self.receipt.tx_type, TxType::Legacy) { - self.encode_fields(out); - return - } - - let mut payload = Vec::new(); - self.encode_fields(&mut payload); - - if with_header { - let payload_length = payload.len() + 1; - let header = alloy_rlp::Header { list: false, payload_length }; - header.encode(out); - } - - match self.receipt.tx_type { - TxType::Legacy => unreachable!("legacy already handled"), - - TxType::Eip2930 => { - out.put_u8(EIP2930_TX_TYPE_ID); - } - TxType::Eip1559 => { - out.put_u8(EIP1559_TX_TYPE_ID); - } - TxType::Eip4844 => { - out.put_u8(EIP4844_TX_TYPE_ID); - } - TxType::Eip7702 => { - out.put_u8(EIP7702_TX_TYPE_ID); - } - #[cfg(feature = "optimism")] - TxType::Deposit => { - out.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); - } - } - out.put_slice(payload.as_ref()); - } - - /// Returns the length of the receipt data. - fn receipt_length(&self) -> usize { - let rlp_head = self.receipt_rlp_header(); - length_of_length(rlp_head.payload_length) + rlp_head.payload_length - } -} - -impl Encodable for ReceiptWithBloomEncoder<'_> { - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out, true) - } - fn length(&self) -> usize { - let mut payload_len = self.receipt_length(); - // account for eip-2718 type prefix and set the list - if !matches!(self.receipt.tx_type, TxType::Legacy) { - payload_len += 1; - // we include a string header for typed receipts, so include the length here - payload_len += length_of_length(payload_len); - } - - payload_len - } -} - #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; use reth_codecs::Compact; @@ -610,7 +427,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; receipt.encode(&mut data); @@ -644,7 +461,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); @@ -654,7 +471,7 @@ mod tests { #[cfg(feature = "optimism")] #[test] fn decode_deposit_receipt_regolith_roundtrip() { - let data = hex!("7ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); + let data = hex!("b901107ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { @@ -666,21 +483,21 @@ mod tests { deposit_nonce: Some(4012991), deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); let mut buf = Vec::with_capacity(data.len()); - receipt.encode_inner(&mut buf, false); + receipt.encode(&mut buf); assert_eq!(buf, &data[..]); } #[cfg(feature = "optimism")] #[test] fn decode_deposit_receipt_canyon_roundtrip() { - let data = hex!("7ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); + let data = hex!("b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { @@ -692,14 +509,14 @@ mod tests { deposit_nonce: Some(4012991), deposit_receipt_version: Some(1), }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); let mut buf = Vec::with_capacity(data.len()); - expected.encode_inner(&mut buf, false); + expected.encode(&mut buf); assert_eq!(buf, &data[..]); } @@ -746,7 +563,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: Bloom::default(), + logs_bloom: Bloom::default(), }; let encoded = receipt.encoded_2718(); @@ -768,7 +585,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: Bloom::default(), + logs_bloom: Bloom::default(), }; let legacy_encoded = legacy_receipt.encoded_2718(); diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a10b4afff9d7..e5ccb47ba5c3 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -217,7 +217,7 @@ pub fn build_block>( logs: call.logs.iter().map(|log| &log.inner).cloned().collect(), ..Default::default() } - .into(), + .with_bloom(), ); calls.push(call); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index edaca5c1cfca..ec51df8982c3 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -348,7 +348,6 @@ impl SparseStateTrie { }) } } - impl SparseStateTrie where F: BlindedProviderFactory, diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 011c95e6a927..cfce88fa0201 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -23,6 +23,7 @@ reth-trie-common.workspace = true revm.workspace = true # alloy +alloy-eips.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true @@ -63,6 +64,7 @@ serde = [ "alloy-primitives/serde", "alloy-consensus/serde", "alloy-trie/serde", + "alloy-eips/serde", "revm/serde", "reth-trie-common/serde" ] diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index 893e6e9e9994..be6e49545799 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; -use reth_primitives::ReceiptWithBloom; +use reth_primitives::{Receipt, ReceiptWithBloom}; use reth_trie::triehash::KeccakHasher; /// Benchmarks different implementations of the root calculation. @@ -27,8 +27,8 @@ pub fn trie_root_benchmark(c: &mut Criterion) { } } -fn generate_test_data(size: usize) -> Vec { - prop::collection::vec(arb::(), size) +fn generate_test_data(size: usize) -> Vec> { + prop::collection::vec(arb::>(), size) .new_tree(&mut TestRunner::new(ProptestConfig::default())) .unwrap() .current() @@ -43,19 +43,19 @@ criterion_main!(benches); mod implementations { use super::*; + use alloy_eips::eip2718::Encodable2718; use alloy_rlp::Encodable; use alloy_trie::root::adjust_index_for_rlp; + use reth_primitives::Receipt; use reth_trie_common::{HashBuilder, Nibbles}; - pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { - triehash::ordered_trie_root::(receipts.iter().map(|receipt| { - let mut receipt_rlp = Vec::new(); - receipt.encode_inner(&mut receipt_rlp, false); - receipt_rlp - })) + pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { + triehash::ordered_trie_root::( + receipts.iter().map(|receipt_with_bloom| receipt_with_bloom.encoded_2718()), + ) } - pub fn hash_builder_root(receipts: &[ReceiptWithBloom]) -> B256 { + pub fn hash_builder_root(receipts: &[ReceiptWithBloom]) -> B256 { let mut index_buffer = Vec::new(); let mut value_buffer = Vec::new(); @@ -68,7 +68,7 @@ mod implementations { index.encode(&mut index_buffer); value_buffer.clear(); - receipts[index].encode_inner(&mut value_buffer, false); + receipts[index].encode_2718(&mut value_buffer); hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); } From 4f28d6c7a4ca95b20eedbd4ba374c7af5cd85232 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 21:07:21 +0100 Subject: [PATCH 02/70] chore: disable url default features (#13191) --- Cargo.toml | 2 +- crates/net/peers/Cargo.toml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 08fa42e10462..650be8337b5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -527,7 +527,7 @@ syn = "2.0" thiserror = { version = "2.0.0", default-features = false } tracing = "0.1.0" tracing-appender = "0.2" -url = "2.3" +url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 8ca5faec93d5..4cfc0aee3d6f 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -41,7 +41,8 @@ std = [ "alloy-rlp/std", "secp256k1?/std", "serde_with/std", - "thiserror/std" + "thiserror/std", + "url/std" ] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] net = ["dep:tokio", "tokio?/net"] From 53f72976186ff43a781abd810d5ba702bf3e5f4c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 20:57:01 +0100 Subject: [PATCH 03/70] chore: rm validate delegate (#13190) --- crates/primitives/src/transaction/sidecar.rs | 11 ----------- crates/rpc/rpc/src/eth/bundle.rs | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 2cf04bc8e741..e244a53df77e 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -39,17 +39,6 @@ impl BlobTransaction { } } - /// Verifies that the transaction's blob data, commitments, and proofs are all valid. - /// - /// See also [`alloy_consensus::TxEip4844::validate_blob`] - #[cfg(feature = "c-kzg")] - pub fn validate( - &self, - proof_settings: &c_kzg::KzgSettings, - ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { - self.tx().validate_blob(proof_settings) - } - /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index ba142651fd9d..478d1de1c51f 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -198,7 +198,7 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(EnvKzgSettings::Default.get()).map_err(|e| { + tx.tx().validate_blob(EnvKzgSettings::Default.get()).map_err(|e| { Eth::Error::from_eth_err(EthApiError::InvalidParams(e.to_string())) })?; } From e615010cc66cd1ad9cee4917515c58a5748de508 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 6 Dec 2024 15:33:07 -0500 Subject: [PATCH 04/70] fix: don't use reserved word None in bug template (#13192) --- .github/ISSUE_TEMPLATE/bug.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index b3e50defe160..b01d4518f75d 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -67,7 +67,7 @@ body: description: Were you running it in a container? multiple: true options: - - None + - Not running in a container - Docker - Kubernetes - LXC/LXD From c608679963e9abeebad050ff9715e23e99648ad8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 7 Dec 2024 03:38:20 +0700 Subject: [PATCH 05/70] perf(`AllTransactions`-iter): do not clone all transactions by default (#13187) --- crates/transaction-pool/src/pool/mod.rs | 17 ++++++++++++++--- crates/transaction-pool/src/pool/txpool.rs | 6 +++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index c13dca17de01..d93b4a14d80c 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -295,7 +295,7 @@ where /// Returns _all_ transactions in the pool. pub fn pooled_transactions(&self) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned().collect() } /// Returns only the first `max` transactions in the pool. @@ -303,7 +303,13 @@ where &self, max: usize, ) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).take(max).collect() + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.propagate) + .take(max) + .cloned() + .collect() } /// Converts the internally tracked transaction to the pooled format. @@ -857,7 +863,12 @@ where &self, origin: TransactionOrigin, ) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.origin == origin).collect() + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.origin == origin) + .cloned() + .collect() } /// Returns all pending transactions filted by [`TransactionOrigin`] diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 7dd64da73645..1b330543cffa 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1095,11 +1095,11 @@ impl AllTransactions { self.by_hash.keys().copied() } - /// Returns an iterator over all _unique_ hashes in the pool + /// Returns an iterator over all transactions in the pool pub(crate) fn transactions_iter( &self, - ) -> impl Iterator>> + '_ { - self.by_hash.values().cloned() + ) -> impl Iterator>> + '_ { + self.by_hash.values() } /// Returns if the transaction for the given hash is already included in this pool From a0326e4f86e43fa14eac0060fc5770cccdcc89b8 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 7 Dec 2024 03:35:30 +0700 Subject: [PATCH 06/70] perf: more `FxHashMap`s for `SenderId` key (#13188) --- crates/transaction-pool/src/pool/mod.rs | 10 +++------- crates/transaction-pool/src/pool/pending.rs | 9 +++++---- crates/transaction-pool/src/pool/txpool.rs | 6 +++--- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index d93b4a14d80c..89c4d6d3465a 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -89,12 +89,8 @@ use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; use reth_primitives::RecoveredTx; -use std::{ - collections::{HashMap, HashSet}, - fmt, - sync::Arc, - time::Instant, -}; +use rustc_hash::FxHashMap; +use std::{collections::HashSet, fmt, sync::Arc, time::Instant}; use tokio::sync::mpsc; use tracing::{debug, trace, warn}; mod events; @@ -216,7 +212,7 @@ where fn changed_senders( &self, accs: impl Iterator, - ) -> HashMap { + ) -> FxHashMap { let mut identifiers = self.identifiers.write(); accs.into_iter() .map(|acc| { diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 89e673aad998..27706bd17543 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -6,9 +6,10 @@ use crate::{ }, Priority, SubPoolLimit, TransactionOrdering, ValidPoolTransaction, }; +use rustc_hash::FxHashMap; use std::{ cmp::Ordering, - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{hash_map::Entry, BTreeMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -36,10 +37,10 @@ pub struct PendingPool { by_id: BTreeMap>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. - highest_nonces: HashMap>, + highest_nonces: FxHashMap>, /// Independent transactions that can be included directly and don't require other /// transactions. - independent_transactions: HashMap>, + independent_transactions: FxHashMap>, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). @@ -523,7 +524,7 @@ impl PendingPool { /// Returns a reference to the independent transactions in the pool #[cfg(test)] - pub(crate) const fn independent(&self) -> &HashMap> { + pub(crate) const fn independent(&self) -> &FxHashMap> { &self.independent_transactions } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1b330543cffa..11212e0aa3e3 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -460,7 +460,7 @@ impl TxPool { /// Updates the transactions for the changed senders. pub(crate) fn update_accounts( &mut self, - changed_senders: HashMap, + changed_senders: FxHashMap, ) -> UpdateOutcome { // track changed accounts self.sender_info.extend(changed_senders.clone()); @@ -481,7 +481,7 @@ impl TxPool { &mut self, block_info: BlockInfo, mined_transactions: Vec, - changed_senders: HashMap, + changed_senders: FxHashMap, update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info @@ -1180,7 +1180,7 @@ impl AllTransactions { /// that got transaction included in the block. pub(crate) fn update( &mut self, - changed_accounts: HashMap, + changed_accounts: FxHashMap, ) -> Vec { // pre-allocate a few updates let mut updates = Vec::with_capacity(64); From 2183752f8d87d936ff3b2b982be0f22af63afa98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Fri, 6 Dec 2024 21:48:52 +0100 Subject: [PATCH 07/70] refactor(prune-types/prune): move PruneLimiter to `reth-prune` (#13182) --- crates/prune/prune/src/db_ext.rs | 2 +- crates/prune/prune/src/lib.rs | 2 ++ crates/prune/{types => prune}/src/limiter.rs | 25 ++++++++++++++++++ crates/prune/prune/src/pruner.rs | 4 +-- crates/prune/prune/src/segments/mod.rs | 6 ++--- crates/prune/prune/src/segments/receipts.rs | 10 +++---- .../prune/src/segments/static_file/headers.rs | 14 +++++----- .../src/segments/static_file/transactions.rs | 10 +++---- .../src/segments/user/account_history.rs | 11 ++++---- .../src/segments/user/receipts_by_logs.rs | 10 +++---- .../src/segments/user/sender_recovery.rs | 8 +++--- .../src/segments/user/storage_history.rs | 15 +++++------ .../src/segments/user/transaction_lookup.rs | 10 +++---- crates/prune/types/src/lib.rs | 2 -- crates/prune/types/src/pruner.rs | 26 +------------------ 15 files changed, 73 insertions(+), 82 deletions(-) rename crates/prune/{types => prune}/src/limiter.rs (94%) diff --git a/crates/prune/prune/src/db_ext.rs b/crates/prune/prune/src/db_ext.rs index a14127af20e3..143cb5e27759 100644 --- a/crates/prune/prune/src/db_ext.rs +++ b/crates/prune/prune/src/db_ext.rs @@ -1,12 +1,12 @@ use std::{fmt::Debug, ops::RangeBounds}; +use crate::PruneLimiter; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, RangeWalker}, table::{Table, TableRow}, transaction::DbTxMut, DatabaseError, }; -use reth_prune_types::PruneLimiter; use tracing::debug; pub(crate) trait DbTxPruneExt: DbTxMut { diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index e6bcbe5e8121..ef3ee0de2dbe 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -12,6 +12,7 @@ mod builder; mod db_ext; mod error; +mod limiter; mod metrics; mod pruner; pub mod segments; @@ -19,6 +20,7 @@ pub mod segments; use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; +pub use limiter::PruneLimiter; pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types diff --git a/crates/prune/types/src/limiter.rs b/crates/prune/prune/src/limiter.rs similarity index 94% rename from crates/prune/types/src/limiter.rs rename to crates/prune/prune/src/limiter.rs index d555db25733b..654eed04f287 100644 --- a/crates/prune/types/src/limiter.rs +++ b/crates/prune/prune/src/limiter.rs @@ -1,3 +1,4 @@ +use reth_prune_types::{PruneInterruptReason, PruneProgress}; use std::{ num::NonZeroUsize, time::{Duration, Instant}, @@ -119,6 +120,30 @@ impl PruneLimiter { pub fn is_limit_reached(&self) -> bool { self.is_deleted_entries_limit_reached() || self.is_time_limit_reached() } + + /// Creates new [`PruneInterruptReason`] based on the limiter's state. + pub fn interrupt_reason(&self) -> PruneInterruptReason { + if self.is_time_limit_reached() { + PruneInterruptReason::Timeout + } else if self.is_deleted_entries_limit_reached() { + PruneInterruptReason::DeletedEntriesLimitReached + } else { + PruneInterruptReason::Unknown + } + } + + /// Creates new [`PruneProgress`]. + /// + /// If `done == true`, returns [`PruneProgress::Finished`], otherwise + /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the + /// limiter's state. + pub fn progress(&self, done: bool) -> PruneProgress { + if done { + PruneProgress::Finished + } else { + PruneProgress::HasMoreData(self.interrupt_reason()) + } + } } #[cfg(test)] diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index 0ad149bb654d..2344578bd087 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -2,14 +2,14 @@ use crate::{ segments::{PruneInput, Segment}, - Metrics, PrunerError, PrunerEvent, + Metrics, PruneLimiter, PrunerError, PrunerEvent, }; use alloy_primitives::BlockNumber; use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_prune_types::{PruneLimiter, PruneProgress, PrunedSegmentInfo, PrunerOutput}; +use reth_prune_types::{PruneProgress, PrunedSegmentInfo, PrunerOutput}; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index e828512fa824..ae18bcb3c6ee 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -3,12 +3,10 @@ mod set; mod static_file; mod user; -use crate::PrunerError; +use crate::{PruneLimiter, PrunerError}; use alloy_primitives::{BlockNumber, TxNumber}; use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; -use reth_prune_types::{ - PruneCheckpoint, PruneLimiter, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, -}; +use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; pub use set::SegmentSet; pub use static_file::{ Headers as StaticFileHeaders, Receipts as StaticFileReceipts, diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index a365738a777d..dbea32c47fe5 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -12,9 +12,7 @@ use reth_provider::{ errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, }; -use reth_prune_types::{ - PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, -}; +use reth_prune_types::{PruneCheckpoint, PruneSegment, SegmentOutput, SegmentOutputCheckpoint}; use tracing::trace; pub(crate) fn prune( @@ -56,7 +54,7 @@ where // so we could finish pruning its receipts on the next run. .checked_sub(if done { 0 } else { 1 }); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -83,7 +81,7 @@ pub(crate) fn save_checkpoint( #[cfg(test)] mod tests { - use crate::segments::{PruneInput, SegmentOutput}; + use crate::segments::{PruneInput, PruneLimiter, SegmentOutput}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ @@ -93,7 +91,7 @@ mod tests { use reth_db::tables; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 5cd6f62643a4..7d100f4e2832 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -3,7 +3,7 @@ use std::num::NonZeroUsize; use crate::{ db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, - PrunerError, + PruneLimiter, PrunerError, }; use alloy_primitives::BlockNumber; use itertools::Itertools; @@ -14,8 +14,7 @@ use reth_db::{ }; use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ - PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, - SegmentOutputCheckpoint, + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use reth_static_file_types::StaticFileSegment; use tracing::trace; @@ -92,7 +91,7 @@ impl> Segment Self { - if limiter.is_time_limit_reached() { - Self::Timeout - } else if limiter.is_deleted_entries_limit_reached() { - Self::DeletedEntriesLimitReached - } else { - Self::Unknown - } - } - /// Returns `true` if the reason is timeout. pub const fn is_timeout(&self) -> bool { matches!(self, Self::Timeout) @@ -124,19 +113,6 @@ impl PruneInterruptReason { } impl PruneProgress { - /// Creates new [`PruneProgress`]. - /// - /// If `done == true`, returns [`PruneProgress::Finished`], otherwise - /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the - /// passed limiter. - pub fn new(done: bool, limiter: &PruneLimiter) -> Self { - if done { - Self::Finished - } else { - Self::HasMoreData(PruneInterruptReason::new(limiter)) - } - } - /// Returns `true` if prune run is finished. pub const fn is_finished(&self) -> bool { matches!(self, Self::Finished) From e9915702fa226bdec497993ccee9fb4818cbbc18 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 6 Dec 2024 22:08:22 +0100 Subject: [PATCH 08/70] perf: call increment once (#13193) --- crates/transaction-pool/src/pool/txpool.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 11212e0aa3e3..5820b5f894af 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -489,13 +489,16 @@ impl TxPool { self.all_transactions.set_block_info(block_info); // Remove all transaction that were included in the block + let mut removed_txs_count = 0; for tx_hash in &mined_transactions { if self.prune_transaction_by_hash(tx_hash).is_some() { - // Update removed transactions metric - self.metrics.removed_transactions.increment(1); + removed_txs_count += 1; } } + // Update removed transactions metric + self.metrics.removed_transactions.increment(removed_txs_count); + let UpdateOutcome { promoted, discarded } = self.update_accounts(changed_senders); self.update_transaction_type_metrics(); From 552c6237a8267a05840ae8d99de5c164b8e75017 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 6 Dec 2024 16:35:51 -0500 Subject: [PATCH 09/70] feat: make BlockResponse generic over header (#13195) --- crates/net/downloaders/src/bodies/bodies.rs | 19 ++++++++++----- crates/net/downloaders/src/bodies/noop.rs | 2 +- crates/net/downloaders/src/bodies/queue.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 4 ++-- crates/net/p2p/src/bodies/downloader.rs | 2 +- crates/net/p2p/src/bodies/response.rs | 23 +++++++++++-------- crates/primitives-traits/src/header/sealed.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 2 +- 8 files changed, 33 insertions(+), 23 deletions(-) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 1ee94929913e..bdf2aca9c778 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -61,7 +61,7 @@ pub struct BodiesDownloader { /// Buffered responses buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec>, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } @@ -193,7 +193,7 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec>) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); @@ -210,7 +210,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec>) { + fn buffer_bodies_response( + &mut self, + response: Vec>, + ) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + response.capacity() * mem::size_of::>(); @@ -227,7 +230,9 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option>> { + fn try_next_buffered( + &mut self, + ) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -253,7 +258,9 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option>> { + fn try_split_next_batch( + &mut self, + ) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -436,7 +443,7 @@ where #[derive(Debug)] struct OrderedBodiesResponse { - resp: Vec>, + resp: Vec>, /// The total size of the response in bytes size: usize, } diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index 494a5f2ef2ec..f311a242c20d 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -21,7 +21,7 @@ impl BodyDownloader for NoopBodiesDownloader { } impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index aa6ec9e4af0f..5f1e8b059cf8 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -80,7 +80,7 @@ impl Stream for BodiesRequestQueue where B: BodiesClient + 'static, { - type Item = DownloadResult>>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 40fa9c309ba4..28cfdb61b7cd 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -48,7 +48,7 @@ pub(crate) struct BodiesRequestFuture { // Headers to download. The collection is shrunk as responses are buffered. pending_headers: VecDeque, /// Internal buffer for all blocks - buffer: Vec>, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, @@ -217,7 +217,7 @@ impl Future for BodiesRequestFuture where B: BodiesClient + 'static, { - type Output = DownloadResult>>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index 7008c08e522e..06f35fc9bd69 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -5,7 +5,7 @@ use futures::Stream; use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 11aaab17a300..02534ea09637 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,19 +1,22 @@ use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{BlockHeader, InMemorySize}; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response - Empty(SealedHeader), + Empty(SealedHeader), } -impl BlockResponse { +impl BlockResponse +where + H: BlockHeader, +{ /// Return the reference to the response header - pub const fn header(&self) -> &SealedHeader { + pub const fn header(&self) -> &SealedHeader { match self { Self::Full(block) => &block.header, Self::Empty(header) => header, @@ -22,14 +25,14 @@ impl BlockResponse { /// Return the block number pub fn block_number(&self) -> BlockNumber { - self.header().number + self.header().number() } /// Return the reference to the response header pub fn difficulty(&self) -> U256 { match self { - Self::Full(block) => block.difficulty, - Self::Empty(header) => header.difficulty, + Self::Full(block) => block.difficulty(), + Self::Empty(header) => header.difficulty(), } } @@ -42,7 +45,7 @@ impl BlockResponse { } } -impl InMemorySize for BlockResponse { +impl InMemorySize for BlockResponse { #[inline] fn size(&self) -> usize { match self { diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 1a5163e6ba3f..e99b0e1c17ff 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -67,7 +67,7 @@ impl SealedHeader { } } -impl InMemorySize for SealedHeader { +impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] fn size(&self) -> usize { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 83be3f36fcf8..88a1b96e249e 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -56,7 +56,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>>, + buffer: Option>>, } impl BodyStage { From 9167e454b5b3c07ea3147fcf99760ec37b0207ad Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 7 Dec 2024 07:28:50 +0400 Subject: [PATCH 10/70] refactor: simplify and relax some RPC bounds (#13202) --- crates/primitives-traits/src/receipt.rs | 9 +- crates/rpc/rpc-builder/src/eth.rs | 36 ++---- crates/rpc/rpc-builder/src/lib.rs | 88 ++++++-------- crates/rpc/rpc/src/debug.rs | 131 ++++++++++----------- crates/rpc/rpc/src/eth/filter.rs | 148 ++++++++++++------------ crates/rpc/rpc/src/eth/pubsub.rs | 122 ++++++++----------- crates/rpc/rpc/src/trace.rs | 48 +++----- crates/rpc/rpc/src/validation.rs | 47 ++++---- 8 files changed, 279 insertions(+), 350 deletions(-) diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 1c115981e3e8..1b5d2b698c8b 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -3,7 +3,9 @@ use alloc::vec::Vec; use core::fmt; -use alloy_consensus::{TxReceipt, Typed2718}; +use alloy_consensus::{ + Eip2718EncodableReceipt, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, +}; use alloy_primitives::B256; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; @@ -23,8 +25,9 @@ pub trait Receipt: + Default + fmt::Debug + TxReceipt - + alloy_rlp::Encodable - + alloy_rlp::Decodable + + RlpEncodableReceipt + + RlpDecodableReceipt + + Eip2718EncodableReceipt + Typed2718 + MaybeSerde + InMemorySize diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 453efb0ddb4a..2a6744e7b18e 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,6 +1,6 @@ use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::EthPrimitives; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; @@ -15,38 +15,35 @@ pub type DynEthApiBuilder { +pub struct EthHandlers { /// Main `eth_` request handler pub api: EthApi, /// The async caching layer used by the eth handlers pub cache: EthStateCache, /// Polling based filter handler available on all transports - pub filter: EthFilter, + pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) - pub pubsub: EthPubSub, + pub pubsub: EthPubSub, } -impl EthHandlers +impl EthHandlers where Provider: StateProviderFactory + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, > + EvmEnvProvider + Clone + Unpin + 'static, - Pool: Send + Sync + Clone + 'static, - Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: EthApiTypes + 'static, { /// Returns a new instance with handlers for `eth` namespace. /// /// This will spawn all necessary tasks for the handlers. #[allow(clippy::too_many_arguments)] - pub fn bootstrap( + pub fn bootstrap( provider: Provider, pool: Pool, network: Network, @@ -92,22 +89,13 @@ where let api = eth_api_builder(&ctx); - let filter = EthFilter::new( - ctx.provider.clone(), - ctx.pool.clone(), - ctx.cache.clone(), - ctx.config.filter_config(), - Box::new(ctx.executor.clone()), - api.tx_resp_builder().clone(), - ); + let filter = + EthFilter::new(api.clone(), ctx.config.filter_config(), Box::new(ctx.executor.clone())); let pubsub = EthPubSub::with_spawner( - ctx.provider.clone(), - ctx.pool.clone(), + api.clone(), ctx.events.clone(), - ctx.network.clone(), Box::new(ctx.executor.clone()), - api.tx_resp_builder().clone(), ); Self { api, cache: ctx.cache, filter, pubsub } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index df25564486f1..949e377afb10 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -204,11 +204,11 @@ use reth_consensus::FullConsensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::{EthPrimitives, NodePrimitives}; +use reth_primitives::NodePrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, HeaderProvider, ProviderBlock, ProviderHeader, - ProviderReceipt, ReceiptProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, + ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, @@ -273,7 +273,7 @@ pub async fn launch, block_executor: BlockExecutor, - consensus: Arc, + consensus: Arc>, ) -> Result where Provider: FullRpcProvider< @@ -285,7 +285,7 @@ where Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer< Provider: BlockReader< @@ -298,6 +298,8 @@ where Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, >, >, { @@ -649,15 +651,17 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, >, >, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -687,11 +691,9 @@ where >, >, Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - Header = ::Header, + Block = ::Block, + Receipt = ::Receipt, >, - Pool: TransactionPool::Transaction>, { let Self { provider, @@ -815,7 +817,6 @@ where Provider: BlockReader< Block = ::Block, Receipt = ::Receipt, - Header = ::Header, >, Pool: TransactionPool::Transaction>, { @@ -963,7 +964,7 @@ pub struct RpcRegistryInner< /// Holds the configuration for the RPC modules config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers - eth: EthHandlers, + eth: EthHandlers, /// to put trace calls behind semaphore blocking_pool_guard: BlockingTaskGuard, /// Contains the [Methods] of a module @@ -977,16 +978,15 @@ impl where Provider: StateProviderFactory + BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, > + EvmEnvProvider + Clone + Unpin + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, EthApi: EthApiTypes + 'static, BlockExecutor: BlockExecutorProvider, @@ -1057,7 +1057,7 @@ where } /// Returns a reference to the installed [`EthHandlers`]. - pub const fn eth_handlers(&self) -> &EthHandlers { + pub const fn eth_handlers(&self) -> &EthHandlers { &self.eth } @@ -1215,7 +1215,6 @@ where pub fn register_trace(&mut self) -> &mut Self where EthApi: TraceExt, - Provider: BlockReader::Block>, { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); @@ -1276,15 +1275,11 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&self) -> TraceApi + pub fn trace_api(&self) -> TraceApi where EthApi: TraceExt, { - TraceApi::new( - self.provider.clone(), - self.eth_api().clone(), - self.blocking_pool_guard.clone(), - ) + TraceApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone()) } /// Instantiates [`EthBundle`] Api @@ -1305,14 +1300,13 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi + pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, BlockExecutor: BlockExecutorProvider>, { DebugApi::new( - self.provider.clone(), self.eth_api().clone(), self.blocking_pool_guard.clone(), self.block_executor.clone(), @@ -1340,7 +1334,7 @@ where /// Instantiates `ValidationApi` pub fn validation_api(&self) -> ValidationApi where - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { ValidationApi::new( self.provider.clone(), @@ -1355,30 +1349,27 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider< - Block = ::Block, - Receipt = ::Receipt, - Header = ::Header, - > + AccountReader - + ChangeSetReader, - Pool: TransactionPool::Transaction> + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, >, >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< Block = reth_primitives::Block, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, >, >, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1468,7 +1459,6 @@ where .into() } RethRpcModule::Debug => DebugApi::new( - self.provider.clone(), eth_api.clone(), self.blocking_pool_guard.clone(), self.block_executor.clone(), @@ -1495,16 +1485,14 @@ where RethRpcModule::Net => { NetApi::new(self.network.clone(), eth_api.clone()).into_rpc().into() } - RethRpcModule::Trace => TraceApi::new( - self.provider.clone(), - eth_api.clone(), - self.blocking_pool_guard.clone(), - ) - .into_rpc() - .into(), + RethRpcModule::Trace => { + TraceApi::new(eth_api.clone(), self.blocking_pool_guard.clone()) + .into_rpc() + .into() + } RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), RethRpcModule::Txpool => TxPoolApi::new( - self.pool.clone(), + self.eth.api.pool().clone(), self.eth.api.tx_resp_builder().clone(), ) .into_rpc() @@ -1524,7 +1512,7 @@ where .into() } RethRpcModule::Flashbots => ValidationApi::new( - self.provider.clone(), + eth_api.provider().clone(), Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 91236ca9de56..5e799dd69caa 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -19,16 +19,16 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{BlockExt, NodePrimitives, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, NodePrimitives, ReceiptWithBloom, SealedBlockWithSenders}; use reth_primitives_traits::{Block as _, BlockBody, SignedTransaction}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, - StateProofProvider, StateProviderFactory, TransactionVariant, + BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, + ReceiptProviderIdExt, StateProofProvider, TransactionVariant, }; use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, + helpers::{EthTransactions, TraceExt}, EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; @@ -47,22 +47,20 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. /// /// This type provides the functionality for handling `debug` related requests. -pub struct DebugApi { - inner: Arc>, +pub struct DebugApi { + inner: Arc>, } // === impl DebugApi === -impl DebugApi { +impl DebugApi { /// Create a new instance of the [`DebugApi`] pub fn new( - provider: Provider, eth: Eth, blocking_task_guard: BlockingTaskGuard, block_executor: BlockExecutor, ) -> Self { - let inner = - Arc::new(DebugApiInner { provider, eth_api: eth, blocking_task_guard, block_executor }); + let inner = Arc::new(DebugApiInner { eth_api: eth, blocking_task_guard, block_executor }); Self { inner } } @@ -72,15 +70,17 @@ impl DebugApi { } } +impl DebugApi { + /// Access the underlying provider. + pub fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } +} + // === impl DebugApi === -impl DebugApi +impl DebugApi where - Provider: BlockReaderIdExt - + HeaderProvider - + ChainSpecProvider - + StateProviderFactory - + 'static, Eth: EthApiTypes + TraceExt + 'static, BlockExecutor: BlockExecutorProvider>>, @@ -164,34 +164,30 @@ where let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(block.header()).await?; // Depending on EIP-2 we need to recover the transactions differently - let senders = if self - .inner - .provider - .chain_spec() - .is_homestead_active_at_block(block.header().number()) - { - block - .body() - .transactions() - .iter() - .map(|tx| { - tx.recover_signer() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - } else { - block - .body() - .transactions() - .iter() - .map(|tx| { - tx.recover_signer_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature) - .map_err(Eth::Error::from_eth_err) - }) - .collect::, Eth::Error>>()? - }; + let senders = + if self.provider().chain_spec().is_homestead_active_at_block(block.header().number()) { + block + .body() + .transactions() + .iter() + .map(|tx| { + tx.recover_signer() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + } else { + block + .body() + .transactions() + .iter() + .map(|tx| { + tx.recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature) + .map_err(Eth::Error::from_eth_err) + }) + .collect::, Eth::Error>>()? + }; self.trace_block( Arc::new(block.with_senders_unchecked(senders).seal_slow()), @@ -209,8 +205,7 @@ where opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { let block_hash = self - .inner - .provider + .provider() .block_hash_for_id(block_id) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -813,30 +808,25 @@ where } #[async_trait] -impl DebugApiServer for DebugApi +impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt - + HeaderProvider - + ChainSpecProvider - + StateProviderFactory - + 'static, - Eth: EthApiSpec + EthTransactions + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives::Provider as BlockReader>::Block>, - >, + Eth: EthApiTypes + EthTransactions + TraceExt + 'static, + BlockExecutor: + BlockExecutorProvider>>, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { let header = match block_id { - BlockId::Hash(hash) => self.inner.provider.header(&hash.into()).to_rpc_result()?, + BlockId::Hash(hash) => self.provider().header(&hash.into()).to_rpc_result()?, BlockId::Number(number_or_tag) => { let number = self - .inner - .provider + .provider() .convert_block_number(number_or_tag) .to_rpc_result()? - .ok_or_else(|| internal_rpc_err("Pending block not supported".to_string()))?; - self.inner.provider.header_by_number(number).to_rpc_result()? + .ok_or_else(|| { + internal_rpc_err("Pending block not supported".to_string()) + })?; + self.provider().header_by_number(number).to_rpc_result()? } }; @@ -851,8 +841,7 @@ where /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { let block = self - .inner - .provider + .provider() .block_by_id(block_id) .to_rpc_result()? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -874,8 +863,7 @@ where /// Returns the bytes of the transaction for the given hash. async fn raw_transactions(&self, block_id: BlockId) -> RpcResult> { let block = self - .inner - .provider + .provider() .block_with_senders_by_id(block_id, TransactionVariant::NoHash) .to_rpc_result()? .unwrap_or_default(); @@ -885,13 +873,12 @@ where /// Handler for `debug_getRawReceipts` async fn raw_receipts(&self, block_id: BlockId) -> RpcResult> { Ok(self - .inner - .provider + .provider() .receipts_by_block_id(block_id) .to_rpc_result()? .unwrap_or_default() .into_iter() - .map(|receipt| receipt.with_bloom().encoded_2718().into()) + .map(|receipt| ReceiptWithBloom::from(receipt).encoded_2718().into()) .collect()) } @@ -1201,21 +1188,19 @@ where } } -impl std::fmt::Debug for DebugApi { +impl std::fmt::Debug for DebugApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugApi").finish_non_exhaustive() } } -impl Clone for DebugApi { +impl Clone for DebugApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct DebugApiInner { - /// The provider that can interact with the chain. - provider: Provider, +struct DebugApiInner { /// The implementation of `eth` API eth_api: Eth, // restrict the number of concurrent calls to blocking calls diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 8f50fefcb613..6441db70459a 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -10,9 +10,13 @@ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_primitives::SealedBlockWithSenders; -use reth_provider::{BlockIdReader, BlockReader, ProviderBlock, ProviderError, ProviderReceipt}; +use reth_provider::{ + BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, + ProviderError, ProviderReceipt, +}; use reth_rpc_eth_api::{ - EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, + EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcNodeCoreExt, RpcTransaction, + TransactionCompat, }; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, @@ -40,27 +44,22 @@ use tracing::{error, trace}; const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb /// `Eth` filter RPC implementation. -pub struct EthFilter { +pub struct EthFilter { /// All nested fields bundled together - inner: Arc>>, - /// Assembles response data w.r.t. network. - tx_resp_builder: Eth::TransactionCompat, + inner: Arc>, } -impl Clone for EthFilter +impl Clone for EthFilter where Eth: EthApiTypes, - Provider: BlockReader, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } + Self { inner: self.inner.clone() } } } -impl EthFilter +impl EthFilter where - Provider: BlockReader + Send + Sync + 'static, - Pool: Send + Sync + 'static, Eth: EthApiTypes + 'static, { /// Creates a new, shareable instance. @@ -71,22 +70,13 @@ where /// See also [`EthFilterConfig`]. /// /// This also spawns a task that periodically clears stale filters. - pub fn new( - provider: Provider, - pool: Pool, - eth_cache: EthStateCache, - config: EthFilterConfig, - task_spawner: Box, - tx_resp_builder: Eth::TransactionCompat, - ) -> Self { + pub fn new(eth_api: Eth, config: EthFilterConfig, task_spawner: Box) -> Self { let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } = config; let inner = EthFilterInner { - provider, + eth_api, active_filters: ActiveFilters::new(), - pool, id_provider: Arc::new(EthSubscriptionIdProvider::default()), - eth_cache, max_headers_range: MAX_HEADERS_RANGE, task_spawner, stale_filter_ttl, @@ -95,7 +85,7 @@ where max_logs_per_response: max_logs_per_response.unwrap_or(usize::MAX), }; - let eth_filter = Self { inner: Arc::new(inner), tx_resp_builder }; + let eth_filter = Self { inner: Arc::new(inner) }; let this = eth_filter.clone(); eth_filter.inner.task_spawner.spawn_critical( @@ -143,18 +133,26 @@ where } } -impl EthFilter +impl EthFilter where - Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool::Transaction> + 'static, - Eth: FullEthApiTypes, + Eth: FullEthApiTypes + RpcNodeCoreExt, { + /// Access the underlying provider. + fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } + + /// Access the underlying pool. + fn pool(&self) -> &Eth::Pool { + self.inner.eth_api.pool() + } + /// Returns all the filter changes for the given id, if any pub async fn filter_changes( &self, id: FilterId, ) -> Result>, EthFilterError> { - let info = self.inner.provider.chain_info()?; + let info = self.provider().chain_info()?; let best_number = info.best_number; // start_block is the block from which we should start fetching changes, the next block from @@ -185,7 +183,7 @@ where // [start_block..best_block] let end_block = best_number + 1; let block_hashes = - self.inner.provider.canonical_hashes_range(start_block, end_block).map_err( + self.provider().canonical_hashes_range(start_block, end_block).map_err( |_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()), )?; Ok(FilterChanges::Hashes(block_hashes)) @@ -194,11 +192,11 @@ where let (from_block_number, to_block_number) = match filter.block_option { FilterBlockOption::Range { from_block, to_block } => { let from = from_block - .map(|num| self.inner.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let to = to_block - .map(|num| self.inner.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); logs_utils::get_filter_block_range(from, to, start_block, info) @@ -242,12 +240,9 @@ where } #[async_trait] -impl EthFilterApiServer> - for EthFilter +impl EthFilterApiServer> for EthFilter where - Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool::Transaction> + 'static, - Eth: FullEthApiTypes + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -272,14 +267,16 @@ where let transaction_kind = match kind.unwrap_or_default() { PendingTransactionFilterKind::Hashes => { - let receiver = self.inner.pool.pending_transactions_listener(); + let receiver = self.pool().pending_transactions_listener(); let pending_txs_receiver = PendingTransactionsReceiver::new(receiver); FilterKind::PendingTransaction(PendingTransactionKind::Hashes(pending_txs_receiver)) } PendingTransactionFilterKind::Full => { - let stream = self.inner.pool.new_pending_pool_transactions_listener(); - let full_txs_receiver = - FullTransactionsReceiver::new(stream, self.tx_resp_builder.clone()); + let stream = self.pool().new_pending_pool_transactions_listener(); + let full_txs_receiver = FullTransactionsReceiver::new( + stream, + self.inner.eth_api.tx_resp_builder().clone(), + ); FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new( full_txs_receiver, ))) @@ -332,10 +329,9 @@ where } } -impl std::fmt::Debug for EthFilter +impl std::fmt::Debug for EthFilter where Eth: EthApiTypes, - Provider: BlockReader, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthFilter").finish_non_exhaustive() @@ -344,21 +340,17 @@ where /// Container type `EthFilter` #[derive(Debug)] -struct EthFilterInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, +struct EthFilterInner { + /// Inner `eth` API implementation. + eth_api: Eth, /// All currently installed filters. - active_filters: ActiveFilters, + active_filters: ActiveFilters>, /// Provides ids to identify filters id_provider: Arc, /// Maximum number of blocks that could be scanned per filter max_blocks_per_filter: u64, /// Maximum number of logs that can be returned in a response max_logs_per_response: usize, - /// The async cache frontend for eth related data - eth_cache: EthStateCache, /// maximum number of headers to read at once for range filter max_headers_range: u64, /// The type that can spawn tasks. @@ -367,11 +359,22 @@ struct EthFilterInner { stale_filter_ttl: Duration, } -impl EthFilterInner +impl EthFilterInner where - Provider: BlockReader + BlockIdReader + 'static, - Pool: TransactionPool + 'static, + Eth: RpcNodeCoreExt + EthApiTypes, { + /// Access the underlying provider. + fn provider(&self) -> &Eth::Provider { + self.eth_api.provider() + } + + /// Access the underlying [`EthStateCache`]. + fn eth_cache( + &self, + ) -> &EthStateCache, ProviderReceipt> { + self.eth_api.cache() + } + /// Returns logs matching given filter object. async fn logs_for_filter(&self, filter: Filter) -> Result, EthFilterError> { match filter.block_option { @@ -379,7 +382,7 @@ where // for all matching logs in the block // get the block header with the hash let header = self - .provider + .provider() .header_by_hash_or_number(block_hash.into())? .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; @@ -390,7 +393,7 @@ where let (receipts, maybe_block) = self .receipts_and_maybe_block( &block_num_hash, - self.provider.chain_info()?.best_number, + self.provider().chain_info()?.best_number, ) .await? .ok_or(EthApiError::HeaderNotFound(block_hash.into()))?; @@ -399,8 +402,8 @@ where append_matching_block_logs( &mut all_logs, maybe_block - .map(|b| ProviderOrBlock::Block(b)) - .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), &FilteredParams::new(Some(filter)), block_num_hash, &receipts, @@ -412,16 +415,16 @@ where } FilterBlockOption::Range { from_block, to_block } => { // compute the range - let info = self.provider.chain_info()?; + let info = self.provider().chain_info()?; // we start at the most recent block if unset in filter let start_block = info.best_number; let from = from_block - .map(|num| self.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let to = to_block - .map(|num| self.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let (from_block_number, to_block_number) = @@ -433,8 +436,11 @@ where } /// Installs a new filter and returns the new identifier. - async fn install_filter(&self, kind: FilterKind) -> RpcResult { - let last_poll_block_number = self.provider.best_block_number().to_rpc_result()?; + async fn install_filter( + &self, + kind: FilterKind>, + ) -> RpcResult { + let last_poll_block_number = self.provider().best_block_number().to_rpc_result()?; let id = FilterId::from(self.id_provider.next_id()); let mut filters = self.active_filters.inner.lock().await; filters.insert( @@ -482,7 +488,7 @@ where for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, self.max_headers_range) { - let headers = self.provider.headers_range(from..=to)?; + let headers = self.provider().headers_range(from..=to)?; for (idx, header) in headers.iter().enumerate() { // only if filter matches @@ -494,7 +500,7 @@ where let block_hash = match headers.get(idx + 1) { Some(parent) => parent.parent_hash(), None => self - .provider + .provider() .block_hash(header.number())? .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, }; @@ -506,8 +512,8 @@ where append_matching_block_logs( &mut all_logs, maybe_block - .map(|block| ProviderOrBlock::Block(block)) - .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), &filter_params, num_hash, &receipts, @@ -540,20 +546,20 @@ where best_number: u64, ) -> Result< Option<( - Arc>>, - Option>>>, + Arc>>, + Option>>>, )>, EthFilterError, > { // The last 4 blocks are most likely cached, so we can just fetch them let cached_range = best_number.saturating_sub(4)..=best_number; let receipts_block = if cached_range.contains(&block_num_hash.number) { - self.eth_cache + self.eth_cache() .get_block_and_receipts(block_num_hash.hash) .await? .map(|(b, r)| (r, Some(b))) } else { - self.eth_cache.get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) + self.eth_cache().get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) }; Ok(receipts_block) } diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 58c62133730d..596af187635a 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -17,8 +17,10 @@ use jsonrpsee::{ }; use reth_network_api::NetworkInfo; use reth_primitives::NodePrimitives; -use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; +use reth_provider::{BlockNumReader, CanonStateSubscriptions}; +use reth_rpc_eth_api::{ + pubsub::EthPubSubApiServer, EthApiTypes, RpcNodeCore, RpcTransaction, TransactionCompat, +}; use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; @@ -35,67 +37,47 @@ use tracing::error; /// /// This handles `eth_subscribe` RPC calls. #[derive(Clone)] -pub struct EthPubSub { +pub struct EthPubSub { /// All nested fields bundled together. - inner: Arc>, + inner: Arc>, /// The type that's used to spawn subscription tasks. subscription_task_spawner: Box, - tx_resp_builder: Eth, } // === impl EthPubSub === -impl EthPubSub { +impl EthPubSub { /// Creates a new, shareable instance. /// /// Subscription tasks are spawned via [`tokio::task::spawn`] - pub fn new( - provider: Provider, - pool: Pool, - chain_events: Events, - network: Network, - tx_resp_builder: Eth, - ) -> Self { - Self::with_spawner( - provider, - pool, - chain_events, - network, - Box::::default(), - tx_resp_builder, - ) + pub fn new(eth_api: Eth, chain_events: Events) -> Self { + Self::with_spawner(eth_api, chain_events, Box::::default()) } /// Creates a new, shareable instance. pub fn with_spawner( - provider: Provider, - pool: Pool, + eth_api: Eth, chain_events: Events, - network: Network, subscription_task_spawner: Box, - tx_resp_builder: Eth, ) -> Self { - let inner = EthPubSubInner { provider, pool, chain_events, network }; - Self { inner: Arc::new(inner), subscription_task_spawner, tx_resp_builder } + let inner = EthPubSubInner { eth_api, chain_events }; + Self { inner: Arc::new(inner), subscription_task_spawner } } } #[async_trait::async_trait] -impl EthPubSubApiServer - for EthPubSub +impl EthPubSubApiServer> for EthPubSub where - Provider: BlockReader + EvmEnvProvider + Clone + 'static, - Pool: TransactionPool + 'static, Events: CanonStateSubscriptions< Primitives: NodePrimitives< - SignedTx: Encodable2718, BlockHeader = reth_primitives::Header, Receipt = reth_primitives::Receipt, >, > + Clone + 'static, - Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat> + 'static, + Eth: RpcNodeCore + + EthApiTypes>> + + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -106,9 +88,8 @@ where ) -> jsonrpsee::core::SubscriptionResult { let sink = pending.accept().await?; let pubsub = self.inner.clone(); - let resp_builder = self.tx_resp_builder.clone(); self.subscription_task_spawner.spawn(Box::pin(async move { - let _ = handle_accepted(pubsub, sink, kind, params, resp_builder).await; + let _ = handle_accepted(pubsub, sink, kind, params).await; })); Ok(()) @@ -116,16 +97,13 @@ where } /// The actual handler for an accepted [`EthPubSub::subscribe`] call. -async fn handle_accepted( - pubsub: Arc>, +async fn handle_accepted( + pubsub: Arc>, accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, - tx_resp_builder: Eth, ) -> Result<(), ErrorObject<'static>> where - Provider: BlockReader + EvmEnvProvider + Clone + 'static, - Pool: TransactionPool + 'static, Events: CanonStateSubscriptions< Primitives: NodePrimitives< SignedTx: Encodable2718, @@ -134,8 +112,8 @@ where >, > + Clone + 'static, - Network: NetworkInfo + Clone + 'static, - Eth: TransactionCompat>, + Eth: RpcNodeCore + + EthApiTypes>>, { match kind { SubscriptionKind::NewHeads => { @@ -166,7 +144,7 @@ where let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { let tx_value = match from_recovered( tx.transaction.to_consensus(), - &tx_resp_builder, + pubsub.eth_api.tx_resp_builder(), ) { Ok(tx) => { Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) @@ -204,7 +182,7 @@ where let mut canon_state = BroadcastStream::new(pubsub.chain_events.subscribe_to_canonical_state()); // get current sync status - let mut initial_sync_status = pubsub.network.is_syncing(); + let mut initial_sync_status = pubsub.eth_api.network().is_syncing(); let current_sub_res = pubsub.sync_status(initial_sync_status); // send the current status immediately @@ -215,7 +193,7 @@ where } while canon_state.next().await.is_some() { - let current_syncing = pubsub.network.is_syncing(); + let current_syncing = pubsub.eth_api.network().is_syncing(); // Only send a new response if the sync status has changed if current_syncing != initial_sync_status { // Update the sync status on each new block @@ -285,9 +263,7 @@ where } } -impl std::fmt::Debug - for EthPubSub -{ +impl std::fmt::Debug for EthPubSub { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthPubSub").finish_non_exhaustive() } @@ -295,28 +271,28 @@ impl std::fmt::Debug /// Container type `EthPubSub` #[derive(Clone)] -struct EthPubSubInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, +struct EthPubSubInner { + /// The `eth` API. + eth_api: EthApi, /// A type that allows to create new event subscriptions. chain_events: Events, - /// The network. - network: Network, } // == impl EthPubSubInner === -impl EthPubSubInner +impl EthPubSubInner where - Provider: BlockReader + 'static, + Eth: RpcNodeCore, { /// Returns the current sync status for the `syncing` subscription fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { if is_syncing { - let current_block = - self.provider.chain_info().map(|info| info.best_number).unwrap_or_default(); + let current_block = self + .eth_api + .provider() + .chain_info() + .map(|info| info.best_number) + .unwrap_or_default(); EthSubscriptionResult::SyncState(PubSubSyncStatus::Detailed(SyncStatusMetadata { syncing: true, starting_block: 0, @@ -329,35 +305,31 @@ where } } -impl EthPubSubInner +impl EthPubSubInner where - Pool: TransactionPool + 'static, + Eth: RpcNodeCore, { /// Returns a stream that yields all transaction hashes emitted by the txpool. fn pending_transaction_hashes_stream(&self) -> impl Stream { - ReceiverStream::new(self.pool.pending_transactions_listener()) + ReceiverStream::new(self.eth_api.pool().pending_transactions_listener()) } /// Returns a stream that yields all transactions emitted by the txpool. fn full_pending_transaction_stream( &self, - ) -> impl Stream::Transaction>> { - self.pool.new_pending_pool_transactions_listener() + ) -> impl Stream::Transaction>> { + self.eth_api.pool().new_pending_pool_transactions_listener() } } -impl EthPubSubInner +impl EthPubSubInner where - Provider: BlockReader + EvmEnvProvider + 'static, Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - SignedTx: Encodable2718, - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - > + 'static, - Network: NetworkInfo + 'static, - Pool: 'static, + Primitives: NodePrimitives< + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + >, { /// Returns a stream that yields all new RPC blocks. fn new_headers_stream(&self) -> impl Stream { diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 5f1bbb7439de..b164e3c19eb4 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -20,10 +20,10 @@ use reth_consensus_common::calc::{ }; use reth_evm::ConfigureEvmEnv; use reth_primitives_traits::{BlockBody, BlockHeader}; -use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_provider::{BlockNumReader, BlockReader, ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; @@ -41,21 +41,16 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. /// /// This type provides the functionality for handling `trace` related requests. -pub struct TraceApi { - inner: Arc>, +pub struct TraceApi { + inner: Arc>, } // === impl TraceApi === -impl TraceApi { - /// The provider that can interact with the chain. - pub fn provider(&self) -> &Provider { - &self.inner.provider - } - +impl TraceApi { /// Create a new instance of the [`TraceApi`] - pub fn new(provider: Provider, eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { - let inner = Arc::new(TraceApiInner { provider, eth_api, blocking_task_guard }); + pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { + let inner = Arc::new(TraceApiInner { eth_api, blocking_task_guard }); Self { inner } } @@ -72,15 +67,17 @@ impl TraceApi { } } +impl TraceApi { + /// Access the underlying provider. + pub fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } +} + // === impl TraceApi === -impl TraceApi +impl TraceApi where - Provider: BlockReader::Block> - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + 'static, Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -576,13 +573,8 @@ where } #[async_trait] -impl TraceApiServer for TraceApi +impl TraceApiServer for TraceApi where - Provider: BlockReader::Block> - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + 'static, Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -704,20 +696,18 @@ where } } -impl std::fmt::Debug for TraceApi { +impl std::fmt::Debug for TraceApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceApi").finish_non_exhaustive() } } -impl Clone for TraceApi { +impl Clone for TraceApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct TraceApiInner { - /// The provider that can interact with the chain. - provider: Provider, +struct TraceApiInner { /// Access to commonly used code of the `eth` namespace eth_api: Eth, // restrict the number of concurrent calls to `trace_*` diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index b72a5d35769f..b13e99eb21c3 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -1,4 +1,6 @@ -use alloy_consensus::{BlobTransactionValidationError, EnvKzgSettings, Transaction, TxReceipt}; +use alloy_consensus::{ + BlobTransactionValidationError, BlockHeader, EnvKzgSettings, Transaction, TxReceipt, +}; use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, @@ -16,10 +18,10 @@ use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::{Block as _, BlockBody}; use reth_provider::{ - AccountReader, BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, HeaderProvider, - StateProviderFactory, WithdrawalsProvider, + BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, StateProviderFactory, }; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; @@ -32,7 +34,7 @@ use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait #[derive(Clone, Debug, derive_more::Deref)] -pub struct ValidationApi { +pub struct ValidationApi { #[deref] inner: Arc>, } @@ -40,11 +42,12 @@ pub struct ValidationApi { impl ValidationApi where Provider: ChainSpecProvider, + E: BlockExecutorProvider, { /// Create a new instance of the [`ValidationApi`] pub fn new( provider: Provider, - consensus: Arc, + consensus: Arc>, executor_provider: E, config: ValidationApiConfig, task_spawner: Box, @@ -91,21 +94,18 @@ where Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider + 'static, E: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, + BlockHeader = Provider::Header, + BlockBody = reth_primitives::BlockBody, >, >, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders<::Block>, message: BidTrace, registered_gas_limit: u64, ) -> Result<(), ValidationApiError> { @@ -187,9 +187,9 @@ where let state_root = state_provider.state_root(state_provider.hashed_post_state(&output.state))?; - if state_root != block.state_root { + if state_root != block.header().state_root() { return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), + GotExpected { got: state_root, expected: block.header().state_root() }.into(), ) .into()) } @@ -262,7 +262,7 @@ where /// to checking the latest block transaction. fn ensure_payment( &self, - block: &Block, + block: &::Block, output: &BlockExecutionOutput<::Receipt>, message: &BidTrace, ) -> Result<(), ValidationApiError> { @@ -279,7 +279,7 @@ where (U256::ZERO, U256::ZERO) }; - if let Some(withdrawals) = &block.body.withdrawals { + if let Some(withdrawals) = block.body().withdrawals() { for withdrawal in withdrawals { if withdrawal.address == message.proposer_fee_recipient { balance_before += withdrawal.amount_wei(); @@ -294,7 +294,7 @@ where let (receipt, tx) = output .receipts .last() - .zip(block.body.transactions.last()) + .zip(block.body().transactions().last()) .ok_or(ValidationApiError::ProposerPayment)?; if !receipt.status() { @@ -313,7 +313,7 @@ where return Err(ValidationApiError::ProposerPayment) } - if let Some(block_base_fee) = block.base_fee_per_gas { + if let Some(block_base_fee) = block.header().base_fee_per_gas() { if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { return Err(ValidationApiError::ProposerPayment) } @@ -412,15 +412,12 @@ where Provider: BlockReaderIdExt
+ ChainSpecProvider + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider + Clone + 'static, E: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, + BlockHeader = Provider::Header, + BlockBody = reth_primitives::BlockBody, >, >, { @@ -476,11 +473,11 @@ where } #[derive(Debug)] -pub struct ValidationApiInner { +pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. - consensus: Arc, + consensus: Arc>, /// Execution payload validator. payload_validator: ExecutionPayloadValidator, /// Block executor factory. From 7e9d2c1a34fc85aa4cdaab39015c4f36f8cbcd2f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:00:51 -0500 Subject: [PATCH 11/70] chore: remove unused trie-common alloy-serde dep (#13201) --- crates/trie/common/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 73fce5f8e7bd..4f6a927d4344 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -57,7 +57,7 @@ serde_with.workspace = true [features] eip1186 = [ - "dep:alloy-rpc-types-eth", + "alloy-rpc-types-eth/serde", "dep:alloy-serde", ] serde = [ @@ -88,7 +88,7 @@ test-utils = [ arbitrary = [ "alloy-trie/arbitrary", "dep:arbitrary", - "alloy-serde/arbitrary", + "alloy-serde?/arbitrary", "reth-primitives-traits/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", From 52b8ff4b0c4e5bb560376571bc35e0268e9df587 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 7 Dec 2024 00:29:49 -0500 Subject: [PATCH 12/70] chore: fix `cargo check -p reth-stages --tests` (#13200) --- crates/stages/stages/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index f97214f46433..e7114eeb16ac 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -70,6 +70,7 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true +reth-stages-api = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } From 4d2c5767ec44004af023500dafb7ee9a1193cd72 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Sat, 7 Dec 2024 12:30:36 +0700 Subject: [PATCH 13/70] perf(txpool): remove more clones (#13189) --- crates/transaction-pool/src/pool/txpool.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 5820b5f894af..dd6da1d0fef4 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -462,10 +462,12 @@ impl TxPool { &mut self, changed_senders: FxHashMap, ) -> UpdateOutcome { - // track changed accounts - self.sender_info.extend(changed_senders.clone()); // Apply the state changes to the total set of transactions which triggers sub-pool updates. - let updates = self.all_transactions.update(changed_senders); + let updates = self.all_transactions.update(&changed_senders); + + // track changed accounts + self.sender_info.extend(changed_senders); + // Process the sub-pool updates let update = self.process_updates(updates); // update the metrics after the update @@ -1183,7 +1185,7 @@ impl AllTransactions { /// that got transaction included in the block. pub(crate) fn update( &mut self, - changed_accounts: FxHashMap, + changed_accounts: &FxHashMap, ) -> Vec { // pre-allocate a few updates let mut updates = Vec::with_capacity(64); @@ -1240,7 +1242,7 @@ impl AllTransactions { } } - changed_balance = Some(info.balance); + changed_balance = Some(&info.balance); } // If there's a nonce gap, we can shortcircuit, because there's nothing to update yet. @@ -1291,7 +1293,7 @@ impl AllTransactions { // If the account changed in the block, check the balance. if let Some(changed_balance) = changed_balance { - if cumulative_cost > changed_balance { + if &cumulative_cost > changed_balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { From 6b35b059931bd94cb76f749a60db50ff4c8e1ab8 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Sat, 7 Dec 2024 09:30:56 +0400 Subject: [PATCH 14/70] feat: relax bounds for `EthPubSub` (#13203) Co-authored-by: Matthias Seitz --- crates/node/api/src/node.rs | 3 +- crates/node/builder/src/builder/states.rs | 9 +-- crates/node/builder/src/components/mod.rs | 5 +- crates/node/builder/src/rpc.rs | 41 ++++------- crates/optimism/node/src/node.rs | 6 +- crates/rpc/rpc-builder/src/lib.rs | 79 +++++++++------------- crates/rpc/rpc-eth-types/src/logs_utils.rs | 9 +-- crates/rpc/rpc/src/eth/pubsub.rs | 61 ++++------------- 8 files changed, 74 insertions(+), 139 deletions(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index fc6366a2eb5d..edb68a6589b0 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -61,7 +61,8 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Network: FullNetwork; /// Builds new blocks. - type PayloadBuilder: PayloadBuilder + Clone; + type PayloadBuilder: PayloadBuilder::Engine> + + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 16b7d668ca3c..fa12cc78b615 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -13,9 +13,7 @@ use crate::{ AddOns, FullNode, }; use reth_exex::ExExContext; -use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, PayloadBuilder, -}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB}; use reth_node_core::node_config::NodeConfig; use reth_tasks::TaskExecutor; use std::{fmt, future::Future}; @@ -88,10 +86,7 @@ impl> FullNodeTypes for NodeAdapter type Provider = T::Provider; } -impl> FullNodeComponents for NodeAdapter -where - C::PayloadBuilder: PayloadBuilder, -{ +impl> FullNodeComponents for NodeAdapter { type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index b643e2aa2a65..d62e74bda296 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -26,7 +26,7 @@ use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -52,7 +52,8 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Network: FullNetwork; /// Builds new blocks. - type PayloadBuilder: Clone; + type PayloadBuilder: PayloadBuilder::Engine> + + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index a4010e52db3e..e6c9ad233568 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -10,8 +10,8 @@ use std::{ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, - PayloadBuilder, + AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, NodeTypes, + NodeTypesWithEngine, }; use reth_node_core::{ node_config::NodeConfig, @@ -19,7 +19,7 @@ use reth_node_core::{ }; use reth_payload_builder::PayloadStore; use reth_primitives::EthPrimitives; -use reth_provider::{providers::ProviderNodeTypes, BlockReader}; +use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, EthApi, @@ -33,7 +33,6 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use reth_transaction_pool::TransactionPool; use crate::EthApiBuilderCtx; @@ -404,18 +403,17 @@ where impl RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, - Pool: TransactionPool::Transaction>, - >, - EthApi: EthApiTypes - + FullEthApiServer< - Provider: BlockReader< + Types: ProviderNodeTypes< + Primitives: NodePrimitives< Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, >, - > + AddDevSigners + >, + >, + EthApi: EthApiTypes + + FullEthApiServer + + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, @@ -535,19 +533,10 @@ where impl NodeAddOns for RpcAddOns where - N: FullNodeComponents< - Types: ProviderNodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, - Pool: TransactionPool::Transaction>, - >, + N: FullNodeComponents>, EthApi: EthApiTypes - + FullEthApiServer< - Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - >, - > + AddDevSigners + + FullEthApiServer + + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index b7dcf2741c6d..35e33ccd75a4 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -12,9 +12,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; -use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, PayloadBuilder, TxTy, -}; +use reth_node_api::{AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, TxTy}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, @@ -241,7 +239,6 @@ impl NodeAddOns for OpAddOns where N: FullNodeComponents< Types: NodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -287,7 +284,6 @@ impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< Types: NodeTypes, - PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 949e377afb10..1220020504b0 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -208,7 +208,7 @@ use reth_primitives::NodePrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, - ReceiptProvider, StateProviderFactory, + StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, @@ -286,18 +286,19 @@ where Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm< + Header = ::BlockHeader, + Transaction = ::SignedTx, + >, EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, >, >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, BlockHeader = reth_primitives::Header, BlockBody = reth_primitives::BlockBody, >, @@ -647,16 +648,21 @@ impl RpcModuleBuilder where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider< + Block = ::Block, + Receipt = ::Receipt, + > + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm< + Header = ::BlockHeader, + Transaction = ::SignedTx, + >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, BlockHeader = reth_primitives::Header, BlockBody = reth_primitives::BlockBody, >, @@ -685,15 +691,11 @@ where EngineApi: EngineApiServer, EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, >, >, - Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - >, { let Self { provider, @@ -741,13 +743,16 @@ where /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; + /// use reth_primitives::TransactionSigned; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; /// - /// fn init + 'static>(evm: Evm) { + /// fn init + 'static>( + /// evm: Evm, + /// ) { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) /// .with_pool(NoopTransactionPool::default()) @@ -769,11 +774,6 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, - Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, - >, { let Self { provider, @@ -809,15 +809,11 @@ where where EthApi: FullEthApiServer< Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Receipt = ::Receipt, + Block = ::Block, + Header = ::BlockHeader, >, >, - Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - >, Pool: TransactionPool::Transaction>, { let mut modules = TransportRpcModules::default(); @@ -1155,8 +1151,7 @@ where RpcReceipt, RpcHeader, > + EthApiTypes, - BlockExecutor: - BlockExecutorProvider>, + BlockExecutor: BlockExecutorProvider, { /// Register Eth Namespace /// @@ -1190,17 +1185,8 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_debug(&mut self) -> &mut Self where - EthApi: EthApiSpec - + EthTransactions< - Provider: BlockReader< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - >, - > + TraceExt, - Provider: BlockReader< - Block = ::Block, - Receipt = ::Receipt, - >, + EthApi: EthApiSpec + EthTransactions + TraceExt, + BlockExecutor::Primitives: NodePrimitives>, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1303,8 +1289,7 @@ where pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, - BlockExecutor: - BlockExecutorProvider>, + BlockExecutor::Primitives: NodePrimitives>, { DebugApi::new( self.eth_api().clone(), @@ -1363,10 +1348,8 @@ where >, BlockExecutor: BlockExecutorProvider< Primitives: NodePrimitives< - Block = reth_primitives::Block, BlockHeader = reth_primitives::Header, BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, >, >, Consensus: reth_consensus::FullConsensus + Clone + 'static, diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 6078d32e894f..8b2dbaa54412 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -8,27 +8,28 @@ use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{BlockReader, ProviderBlock}; use std::sync::Arc; /// Returns all matching of a block's receipts when the transaction hashes are known. -pub fn matching_block_logs_with_tx_hashes<'a, I>( +pub fn matching_block_logs_with_tx_hashes<'a, I, R>( filter: &FilteredParams, block_num_hash: BlockNumHash, tx_hashes_and_receipts: I, removed: bool, ) -> Vec where - I: IntoIterator, + I: IntoIterator, + R: TxReceipt + 'a, { let mut all_logs = Vec::new(); // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; // Iterate over transaction hashes and receipts and append matching logs. for (receipt_idx, (tx_hash, receipt)) in tx_hashes_and_receipts.into_iter().enumerate() { - for log in &receipt.logs { + for log in receipt.logs() { if log_matches_filter(block_num_hash, log, filter) { let log = Log { inner: log.clone(), diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 596af187635a..fc02b0da0671 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,13 +2,9 @@ use std::sync::Arc; -use alloy_eips::eip2718::Encodable2718; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ - pubsub::{ - Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, - SyncStatusMetadata, - }, + pubsub::{Params, PubSubSyncStatus, SubscriptionKind, SyncStatusMetadata}, FilteredParams, Header, Log, }; use futures::StreamExt; @@ -68,13 +64,7 @@ impl EthPubSub { #[async_trait::async_trait] impl EthPubSubApiServer> for EthPubSub where - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - > + Clone - + 'static, + Events: CanonStateSubscriptions + 'static, Eth: RpcNodeCore + EthApiTypes>> + 'static, @@ -104,23 +94,13 @@ async fn handle_accepted( params: Option, ) -> Result<(), ErrorObject<'static>> where - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - SignedTx: Encodable2718, - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - > + Clone - + 'static, + Events: CanonStateSubscriptions + 'static, Eth: RpcNodeCore + EthApiTypes>>, { match kind { SubscriptionKind::NewHeads => { - let stream = pubsub - .new_headers_stream() - .map(|header| EthSubscriptionResult::<()>::Header(Box::new(header.into()))); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.new_headers_stream()).await } SubscriptionKind::Logs => { // if no params are provided, used default filter params @@ -131,10 +111,7 @@ where } _ => FilteredParams::default(), }; - let stream = pubsub - .log_stream(filter) - .map(|log| EthSubscriptionResult::<()>::Log(Box::new(log))); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.log_stream(filter)).await } SubscriptionKind::NewPendingTransactions => { if let Some(params) = params { @@ -146,9 +123,7 @@ where tx.transaction.to_consensus(), pubsub.eth_api.tx_resp_builder(), ) { - Ok(tx) => { - Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) - } + Ok(tx) => Some(tx), Err(err) => { error!(target = "rpc", %err, @@ -172,10 +147,7 @@ where } } - let stream = pubsub - .pending_transaction_hashes_stream() - .map(EthSubscriptionResult::<()>::TransactionHash); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.pending_transaction_hashes_stream()).await } SubscriptionKind::Syncing => { // get new block subscription @@ -285,7 +257,7 @@ where Eth: RpcNodeCore, { /// Returns the current sync status for the `syncing` subscription - fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { + fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus { if is_syncing { let current_block = self .eth_api @@ -293,14 +265,14 @@ where .chain_info() .map(|info| info.best_number) .unwrap_or_default(); - EthSubscriptionResult::SyncState(PubSubSyncStatus::Detailed(SyncStatusMetadata { + PubSubSyncStatus::Detailed(SyncStatusMetadata { syncing: true, starting_block: 0, current_block, highest_block: Some(current_block), - })) + }) } else { - EthSubscriptionResult::SyncState(PubSubSyncStatus::Simple(false)) + PubSubSyncStatus::Simple(false) } } } @@ -324,15 +296,12 @@ where impl EthPubSubInner where - Events: CanonStateSubscriptions< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - Receipt = reth_primitives::Receipt, - >, - >, + Events: CanonStateSubscriptions, { /// Returns a stream that yields all new RPC blocks. - fn new_headers_stream(&self) -> impl Stream { + fn new_headers_stream( + &self, + ) -> impl Stream::BlockHeader>> { self.chain_events.canonical_state_stream().flat_map(|new_chain| { let headers = new_chain.committed().headers().collect::>(); futures::stream::iter( From 4fa86c54840919ab8b9c4361169394eaff2759c3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Sat, 7 Dec 2024 00:22:19 -0600 Subject: [PATCH 15/70] Add placeholder `OpHardfork::Isthmus` (#13112) Co-authored-by: Matthias Seitz --- crates/optimism/chainspec/src/lib.rs | 11 ++++++++++- crates/optimism/evm/src/config.rs | 8 ++++++-- crates/optimism/hardforks/src/hardfork.rs | 7 ++++++- crates/optimism/hardforks/src/lib.rs | 12 +++++++++--- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 0ee86bc7d24b..907599fe2a29 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -36,7 +36,7 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::OpHardforks; +use reth_optimism_forks::{OpHardfork, OpHardforks}; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; @@ -166,6 +166,13 @@ impl OpChainSpecBuilder { self } + /// Enable Isthmus at genesis + pub fn isthmus_activated(mut self) -> Self { + self = self.holocene_activated(); + self.inner = self.inner.with_fork(OpHardfork::Isthmus, ForkCondition::Timestamp(0)); + self + } + /// Build the resulting [`OpChainSpec`]. /// /// # Panics @@ -414,6 +421,7 @@ impl From for OpChainSpec { (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), (OpHardfork::Granite.boxed(), genesis_info.granite_time), (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), + (OpHardfork::Isthmus.boxed(), genesis_info.isthmus_time), ]; let mut time_hardforks = time_hardfork_opts @@ -1030,6 +1038,7 @@ mod tests { OpHardfork::Fjord.boxed(), OpHardfork::Granite.boxed(), OpHardfork::Holocene.boxed(), + // OpHardfork::Isthmus.boxed(), ]; assert!(expected_hardforks diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 4a37860efc6b..b32b0929424d 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -12,7 +12,9 @@ pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { + if chain_spec.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) { + todo!() + } else if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { revm_primitives::HOLOCENE } else if chain_spec.fork(OpHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE @@ -31,7 +33,9 @@ pub fn revm_spec_by_timestamp_after_bedrock( /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { + if chain_spec.fork(OpHardfork::Isthmus).active_at_head(block) { + todo!() + } else if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { revm_primitives::HOLOCENE } else if chain_spec.fork(OpHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 661816ae5fe0..962d7bca4bcd 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -33,6 +33,8 @@ hardfork!( Granite, /// Holocene: Holocene, + /// Isthmus: + Isthmus, } ); @@ -159,6 +161,7 @@ impl OpHardfork { Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), Self::Holocene => Some(1732633200), + Self::Isthmus => todo!(), }, ) } @@ -194,6 +197,7 @@ impl OpHardfork { Self::Fjord => Some(1720627201), Self::Granite => Some(1726070401), Self::Holocene => None, + Self::Isthmus => todo!(), }, ) } @@ -357,7 +361,7 @@ mod tests { #[test] fn check_op_hardfork_from_str() { let hardfork_str = - ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE"]; + ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE", "isthMUS"]; let expected_hardforks = [ OpHardfork::Bedrock, OpHardfork::Regolith, @@ -366,6 +370,7 @@ mod tests { OpHardfork::Fjord, OpHardfork::Granite, OpHardfork::Holocene, + OpHardfork::Isthmus, ]; let hardforks: Vec = diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index bf6ca98ce4e9..36f42155e942 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -27,6 +27,12 @@ pub trait OpHardforks: EthereumHardforks { self.fork(OpHardfork::Bedrock).active_at_block(block_number) } + /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block + /// timestamp. + fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + } + /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) @@ -53,9 +59,9 @@ pub trait OpHardforks: EthereumHardforks { self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) } - /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block + /// Returns `true` if [`Isthmus`](OpHardfork::Isthmus) is active at given block /// timestamp. - fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + fn is_isthmus_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) } } From 42a1ba3a824f19c4df7c7a25f7af4372b5862152 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 7 Dec 2024 06:26:01 -0500 Subject: [PATCH 16/70] chore: make zip_blocks generic over header (#13199) --- crates/net/downloaders/src/bodies/test_utils.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index 781d1d93ba54..ca35c7449a00 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -10,10 +10,10 @@ use reth_network_p2p::bodies::response::BlockResponse; use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader}; use std::collections::HashMap; -pub(crate) fn zip_blocks<'a>( - headers: impl Iterator, - bodies: &mut HashMap, -) -> Vec { +pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>( + headers: impl Iterator>, + bodies: &mut HashMap, +) -> Vec> { headers .into_iter() .map(|header| { From 410d361638170141c11fd91b6cf9ff399a54c48d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 13:17:11 +0100 Subject: [PATCH 17/70] chore: move calculate tx root to blockbody trait (#13209) --- crates/consensus/common/src/validation.rs | 5 ++--- crates/primitives-traits/src/block/body.rs | 7 ++++++- crates/primitives/src/traits.rs | 9 --------- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 2d681be449a5..2c38fa2d6cd4 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -7,9 +7,8 @@ use alloy_eips::{ }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{ - BlockBody, BlockBodyTxExt, EthereumHardfork, GotExpected, SealedBlock, SealedHeader, -}; +use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives_traits::BlockBody as _; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index b0fe1e3d0822..cec329990704 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -7,7 +7,7 @@ use crate::{ use alloc::{fmt, vec::Vec}; use alloy_consensus::Transaction; use alloy_eips::{eip2718::Encodable2718, eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; -use alloy_primitives::Bytes; +use alloy_primitives::{Bytes, B256}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullBlockBody: BlockBody {} @@ -44,6 +44,11 @@ pub trait BlockBody: /// Consume the block body and return a [`Vec`] of transactions. fn into_transactions(self) -> Vec; + /// Calculate the transaction root for the block body. + fn calculate_tx_root(&self) -> B256 { + alloy_consensus::proofs::calculate_transaction_root(self.transactions()) + } + /// Returns block withdrawals if any. fn withdrawals(&self) -> Option<&Withdrawals>; diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs index 73eabd8ec986..3f009bba84bb 100644 --- a/crates/primitives/src/traits.rs +++ b/crates/primitives/src/traits.rs @@ -3,7 +3,6 @@ use crate::{ BlockWithSenders, SealedBlock, }; use alloc::vec::Vec; -use alloy_eips::eip2718::Encodable2718; use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; use revm_primitives::{Address, B256}; @@ -91,14 +90,6 @@ impl BlockExt for T {} /// Extension trait for [`BlockBody`] adding helper methods operating with transactions. pub trait BlockBodyTxExt: BlockBody { - /// Calculate the transaction root for the block body. - fn calculate_tx_root(&self) -> B256 - where - Self::Transaction: Encodable2718, - { - crate::proofs::calculate_transaction_root(self.transactions()) - } - /// Recover signer addresses for all transactions in the block body. fn recover_signers(&self) -> Option> where From 828ddbaca43406c33fda762b9d310e6897cfd703 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 7 Dec 2024 14:13:21 +0100 Subject: [PATCH 18/70] chore(engine): refactor code to transform EvmState into HashedPostState (#13207) --- crates/engine/tree/src/tree/root.rs | 89 ++++++++++++----------------- 1 file changed, 35 insertions(+), 54 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 2d00feba50d4..dc0563ade502 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -181,6 +181,38 @@ impl Drop for StateHookSender { } } +fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostState { + let mut hashed_state = HashedPostState::default(); + + for (address, account) in update { + if account.is_touched() { + let hashed_address = keccak256(address); + trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); + + let destroyed = account.is_selfdestructed(); + let info = if destroyed { None } else { Some(account.info.into()) }; + hashed_state.accounts.insert(hashed_address, info); + + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { + value.is_changed().then(|| (keccak256(B256::from(slot)), value.present_value)) + }) + .peekable(); + + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), + ); + } + } + } + + hashed_state +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -258,33 +290,7 @@ where proof_sequence_number: u64, state_root_message_sender: Sender, ) { - let mut hashed_state_update = HashedPostState::default(); - for (address, account) in update { - if account.is_touched() { - let hashed_address = keccak256(address); - trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); - - let destroyed = account.is_selfdestructed(); - let info = if destroyed { None } else { Some(account.info.into()) }; - hashed_state_update.accounts.insert(hashed_address, info); - - let mut changed_storage_iter = account - .storage - .into_iter() - .filter_map(|(slot, value)| { - value - .is_changed() - .then(|| (keccak256(B256::from(slot)), value.present_value)) - }) - .peekable(); - if destroyed || changed_storage_iter.peek().is_some() { - hashed_state_update.storages.insert( - hashed_address, - HashedStorage::from_iter(destroyed, changed_storage_iter), - ); - } - } - } + let hashed_state_update = evm_state_to_hashed_post_state(update); let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); for (address, slots) in &proof_targets { @@ -696,34 +702,9 @@ mod tests { } for update in &state_updates { - for (address, account) in update { - let hashed_address = keccak256(*address); - - if account.is_touched() { - let destroyed = account.is_selfdestructed(); - hashed_state.accounts.insert( - hashed_address, - if destroyed || account.is_empty() { - None - } else { - Some(account.info.clone().into()) - }, - ); - - if destroyed || !account.storage.is_empty() { - let storage = account - .storage - .iter() - .filter(|&(_slot, value)| (!destroyed && value.is_changed())) - .map(|(slot, value)| { - (keccak256(B256::from(*slot)), value.present_value) - }); - hashed_state - .storages - .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); - } - } + hashed_state.extend(evm_state_to_hashed_post_state(update.clone())); + for (address, account) in update { let storage: HashMap = account .storage .iter() From abaeb35fd17acd9705fc8b23ed52c8f4f40368f4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 14:29:42 +0100 Subject: [PATCH 19/70] chore: make reth-network-peers risc compatible (#13210) --- .github/assets/check_rv32imac.sh | 1 + Cargo.lock | 8 ++++---- Cargo.toml | 2 +- crates/net/peers/Cargo.toml | 9 +++++---- crates/net/peers/src/lib.rs | 3 ++- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh index ab1151bfb0ca..075ffb6dc40b 100755 --- a/.github/assets/check_rv32imac.sh +++ b/.github/assets/check_rv32imac.sh @@ -8,6 +8,7 @@ crates_to_check=( reth-ethereum-primitives reth-primitives-traits reth-optimism-forks + reth-network-peers # reth-evm # reth-primitives # reth-optimism-chainspec diff --git a/Cargo.lock b/Cargo.lock index fb3910d40e57..7ee2e9b6d478 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,9 +428,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +checksum = "f542548a609dca89fcd72b3b9f355928cf844d4363c5eed9c5273a3dd225e097" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -439,9 +439,9 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 650be8337b5e..142b00290b9a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -432,7 +432,7 @@ revm-primitives = { version = "14.0.0", default-features = false } alloy-chains = { version = "0.1.32", default-features = false } alloy-dyn-abi = "0.8.11" alloy-primitives = { version = "0.8.11", default-features = false } -alloy-rlp = { version = "0.3.4", default-features = false } +alloy-rlp = { version = "0.3.10", default-features = false } alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 4cfc0aee3d6f..9e7ccc3084de 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -15,8 +15,8 @@ workspace = true # eth alloy-primitives = { workspace = true, features = ["rlp"] } -alloy-rlp = { workspace = true, features = ["derive"] } -enr.workspace = true +alloy-rlp = { workspace = true, features = ["derive", "core-net", "core-error"] } +enr = { workspace = true, optional = true } # crypto @@ -32,6 +32,7 @@ alloy-primitives = { workspace = true, features = ["rand"] } rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } serde_json.workspace = true +enr.workspace = true tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] @@ -42,7 +43,7 @@ std = [ "secp256k1?/std", "serde_with/std", "thiserror/std", - "url/std" + "url/std", ] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] -net = ["dep:tokio", "tokio?/net"] +net = ["std", "dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index 3e2777c2df89..a8bf51da2eef 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -64,6 +64,7 @@ use alloy_primitives::B512; use core::str::FromStr; // Re-export PeerId for ease of use. +#[cfg(feature = "secp256k1")] pub use enr::Enr; /// Alias for a peer identifier @@ -115,8 +116,8 @@ pub fn id2pk(id: PeerId) -> Result { pub enum AnyNode { /// An "enode:" peer with full ip NodeRecord(NodeRecord), - #[cfg(feature = "secp256k1")] /// An "enr:" peer + #[cfg(feature = "secp256k1")] Enr(Enr), /// An incomplete "enode" with only a peer id PeerId(PeerId), From 2846dd242e413f7102c96c39334d8a99f5b393a0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 20:19:43 +0100 Subject: [PATCH 20/70] chore: flip tx conversion impl (#13208) --- crates/primitives/src/transaction/mod.rs | 28 ++++++++++++++++++ crates/primitives/src/transaction/pooled.rs | 32 ++------------------- crates/transaction-pool/src/traits.rs | 3 +- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index f7211489e93a..670ee7f352ef 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -846,6 +846,34 @@ impl TransactionSigned { &self.transaction } + /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. + /// + /// This function used as a helper to convert from a decoded p2p broadcast message to + /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on + /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. + pub fn try_into_pooled(self) -> Result { + let hash = self.hash(); + match self { + Self { transaction: Transaction::Legacy(tx), signature, .. } => { + Ok(PooledTransactionsElement::Legacy(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip2930(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip2930(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip1559(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip1559(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip7702(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip7702(Signed::new_unchecked(tx, signature, hash))) + } + // Not supported because missing blob sidecar + tx @ Self { transaction: Transaction::Eip4844(_), .. } => Err(tx), + #[cfg(feature = "optimism")] + // Not supported because deposit transactions are never pooled + tx @ Self { transaction: Transaction::Deposit(_), .. } => Err(tx), + } + } + /// Transaction hash. Used to identify transaction. pub fn hash(&self) -> TxHash { *self.tx_hash() diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 93a3c1823224..eea10d44c9f8 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -46,34 +46,6 @@ pub enum PooledTransactionsElement { } impl PooledTransactionsElement { - /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. - /// - /// This function used as a helper to convert from a decoded p2p broadcast message to - /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on - /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. - pub fn try_from_broadcast(tx: TransactionSigned) -> Result { - let hash = tx.hash(); - match tx { - TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { - Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) - } - TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { - Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) - } - TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { - Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) - } - TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { - Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) - } - // Not supported because missing blob sidecar - tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), - #[cfg(feature = "optimism")] - // Not supported because deposit transactions are never pooled - tx @ TransactionSigned { transaction: Transaction::Deposit(_), .. } => Err(tx), - } - } - /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// @@ -650,7 +622,7 @@ impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; fn try_from(tx: TransactionSigned) -> Result { - Self::try_from_broadcast(tx).map_err(|_| TransactionConversionError::UnsupportedForP2P) + tx.try_into_pooled().map_err(|_| TransactionConversionError::UnsupportedForP2P) } } @@ -679,7 +651,7 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match Self::try_from_broadcast(tx_signed) { + match tx_signed.try_into_pooled() { Ok(tx) => Ok(tx), Err(tx) => { let (tx, sig, hash) = tx.into_parts(); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2859d71b9d11..a0d4d40983e4 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1267,7 +1267,8 @@ impl PoolTransaction for EthPooledTransaction { tx: RecoveredTx, ) -> Result, Self::TryFromConsensusError> { let (tx, signer) = tx.to_components(); - let pooled = PooledTransactionsElement::try_from_broadcast(tx) + let pooled = tx + .try_into_pooled() .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; Ok(RecoveredTx::from_signed_transaction(pooled, signer)) } From 08b875f4f5bc5930557b188ef1f23b82020b17c5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 7 Dec 2024 22:15:32 +0100 Subject: [PATCH 21/70] chore: feature gate reth-codecs in trie-common (#13215) --- crates/stages/types/Cargo.toml | 1 + crates/trie/common/Cargo.toml | 16 ++++++++++++---- crates/trie/common/src/hash_builder/state.rs | 8 +++++--- crates/trie/common/src/nibbles.rs | 12 +++++++----- crates/trie/common/src/storage.rs | 4 ++-- crates/trie/common/src/subnode.rs | 8 +++++--- 6 files changed, 32 insertions(+), 17 deletions(-) diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index d8ab63552571..0243415942b5 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -38,6 +38,7 @@ reth-codec = [ "dep:reth-codecs", "dep:bytes", "dep:modular-bitfield", + "reth-trie-common/reth-codec" ] test-utils = [ "dep:arbitrary", diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 4f6a927d4344..eadbb3176b55 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -18,14 +18,14 @@ alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-trie.workspace = true alloy-consensus.workspace = true reth-primitives-traits.workspace = true -reth-codecs.workspace = true +reth-codecs = { workspace = true, optional = true } revm-primitives.workspace = true alloy-genesis.workspace = true alloy-rpc-types-eth = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } -bytes.workspace = true +bytes = { workspace = true, optional = true } derive_more.workspace = true itertools.workspace = true nybbles = { workspace = true, features = ["rlp"] } @@ -42,8 +42,11 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-codecs.workspace = true + alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } +bytes.workspace = true hash-db = "=0.15.2" plain_hasher = "0.2" arbitrary = { workspace = true, features = ["derive"] } @@ -62,7 +65,7 @@ eip1186 = [ ] serde = [ "dep:serde", - "bytes/serde", + "bytes?/serde", "nybbles/serde", "alloy-primitives/serde", "alloy-consensus/serde", @@ -70,7 +73,11 @@ serde = [ "alloy-rpc-types-eth?/serde", "revm-primitives/serde", "reth-primitives-traits/serde", - "reth-codecs/serde" + "reth-codecs?/serde" +] +reth-codec = [ + "dep:reth-codecs", + "dep:bytes", ] serde-bincode-compat = [ "serde", @@ -86,6 +93,7 @@ test-utils = [ "reth-codecs/test-utils", ] arbitrary = [ + "dep:reth-codecs", "alloy-trie/arbitrary", "dep:arbitrary", "alloy-serde?/arbitrary", diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index ec6b102d44ec..4bf3bade3986 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -1,8 +1,6 @@ use crate::TrieMask; use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; -use bytes::Buf; use nybbles::Nibbles; -use reth_codecs::Compact; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. @@ -63,7 +61,8 @@ impl From for HashBuilderState { } } -impl Compact for HashBuilderState { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for HashBuilderState { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -106,6 +105,8 @@ impl Compact for HashBuilderState { } fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let (key, mut buf) = Vec::from_compact(buf, 0); let stack_len = buf.get_u16() as usize; @@ -150,6 +151,7 @@ impl Compact for HashBuilderState { #[cfg(test)] mod tests { use super::*; + use reth_codecs::Compact; #[test] fn hash_builder_state_regression() { diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 2d4e34b3e3bf..b1cc2f10c56f 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -1,7 +1,4 @@ -use bytes::Buf; use derive_more::Deref; -use reth_codecs::Compact; - pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. @@ -45,7 +42,8 @@ impl core::borrow::Borrow<[u8]> for StoredNibbles { } } -impl Compact for StoredNibbles { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredNibbles { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -55,6 +53,8 @@ impl Compact for StoredNibbles { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let nibbles = &buf[..len]; buf.advance(len); (Self(Nibbles::from_nibbles_unchecked(nibbles)), buf) @@ -88,7 +88,8 @@ impl From for Nibbles { } } -impl Compact for StoredNibblesSubKey { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredNibblesSubKey { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -114,6 +115,7 @@ impl Compact for StoredNibblesSubKey { mod tests { use super::*; use bytes::BytesMut; + use reth_codecs::Compact; #[test] fn test_stored_nibbles_from_nibbles() { diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index cf2945d9101a..3ebcc4e810e4 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,5 +1,4 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; -use reth_codecs::Compact; /// Account storage trie node. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -14,7 +13,8 @@ pub struct StorageTrieEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageTrieEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageTrieEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/trie/common/src/subnode.rs b/crates/trie/common/src/subnode.rs index c64b2317cf30..de65a7887806 100644 --- a/crates/trie/common/src/subnode.rs +++ b/crates/trie/common/src/subnode.rs @@ -1,6 +1,4 @@ use super::BranchNodeCompact; -use bytes::Buf; -use reth_codecs::Compact; /// Walker sub node for storing intermediate state root calculation state in the database. #[derive(Debug, Clone, PartialEq, Eq, Default)] @@ -13,7 +11,8 @@ pub struct StoredSubNode { pub node: Option, } -impl Compact for StoredSubNode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredSubNode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -46,6 +45,8 @@ impl Compact for StoredSubNode { } fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let key_len = buf.get_u16() as usize; let key = Vec::from(&buf[..key_len]); buf.advance(key_len); @@ -69,6 +70,7 @@ mod tests { use super::*; use crate::TrieMask; use alloy_primitives::B256; + use reth_codecs::Compact; #[test] fn subnode_roundtrip() { From 73785ccf0517db5243040e5e70762f7b6aef831a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 8 Dec 2024 12:27:31 +0000 Subject: [PATCH 22/70] chore(deps): weekly `cargo update` (#13216) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 326 ++++++++++++++++++++++++++--------------------------- 1 file changed, 161 insertions(+), 165 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ee2e9b6d478..0dfce7a28091 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,15 +100,15 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.47" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18c5c520273946ecf715c0010b4e3503d7eba9893cd9ce6b7fff5654c4a3c470" +checksum = "a0161082e0edd9013d23083465cc04b20e44b7a15646d36ba7b0cdb7cd6fe18f" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08234c0eece0e08602db5095a16dc942cad91967cccfcfc2c6a42c25563964f" +checksum = "fa60357dda9a3d0f738f18844bd6d0f4a5924cc5cf00bfad2ff1369897966123" dependencies = [ "alloy-consensus", "alloy-eips", @@ -170,7 +170,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -275,7 +275,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -301,14 +301,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] name = "alloy-network-primitives" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18abfc73ce48f074c8bc6e05c1f08ef0b1ddc9b04f191a821d0beb9470a42a29" +checksum = "c20219d1ad261da7a6331c16367214ee7ded41d001fabbbd656fbf71898b2773" dependencies = [ "alloy-consensus", "alloy-eips", @@ -329,7 +329,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", "url", ] @@ -351,7 +351,7 @@ dependencies = [ "getrandom 0.2.15", "hashbrown 0.15.2", "hex-literal", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "k256", "keccak-asm", @@ -400,7 +400,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", "url", @@ -512,9 +512,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abca110e59f760259e26d0c84912121468008aba48dd227af0f306cfd7bce9ae" +checksum = "200661999b6e235d9840be5d60a6e8ae2f0af9eb2a256dd378786744660e36ec" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -533,7 +533,7 @@ dependencies = [ "alloy-serde", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -614,7 +614,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -652,7 +652,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -670,7 +670,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -696,7 +696,7 @@ dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.6.0", + "indexmap 2.7.0", "proc-macro-error2", "proc-macro2", "quote", @@ -755,7 +755,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tower 0.5.1", "tracing", @@ -907,9 +907,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "aquamarine" @@ -1395,7 +1395,7 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.6.0", + "indexmap 2.7.0", "num-bigint", "rustc-hash 2.1.0", ] @@ -1421,7 +1421,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.6.0", + "indexmap 2.7.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1467,7 +1467,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.6.0", + "indexmap 2.7.0", "once_cell", "phf", "rustc-hash 2.1.0", @@ -1692,9 +1692,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -1793,9 +1793,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -1803,9 +1803,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -1827,9 +1827,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "coins-bip32" @@ -1975,9 +1975,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" dependencies = [ "const_format_proc_macros", "konst", @@ -1985,9 +1985,9 @@ dependencies = [ [[package]] name = "const_format_proc_macros" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ "proc-macro2", "quote", @@ -2654,7 +2654,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "walkdir", ] @@ -2783,12 +2783,11 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfbba28f4f3f32d92c06a64f5bf6c4537b5d4e21f28c689bd2bbaecfea4e0d3e" +checksum = "036c84bd29bff35e29bbee3c8fc0e2fb95db12b6f2f3cae82a827fbc97256f3a" dependencies = [ "alloy-primitives", - "derivative", "ethereum_serde_utils", "itertools 0.13.0", "serde", @@ -2799,9 +2798,9 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d37845ba7c16bf4be8be4b5786f03a2ba5f2fda0d7f9e7cb2282f69cff420d7" +checksum = "9dc8e67e1f770f5aa4c2c2069aaaf9daee7ac21bed357a71b911b37a58966cfb" dependencies = [ "darling", "proc-macro2", @@ -2830,7 +2829,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -2918,7 +2917,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -3420,9 +3419,9 @@ checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "generator" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb949699c3e4df3a183b1d2142cb24277057055ed23c68ed58894f76c517223" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" dependencies = [ "cfg-if", "libc", @@ -3558,7 +3557,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -3719,9 +3718,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -4153,9 +4152,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "arbitrary", "equivalent", @@ -4182,7 +4181,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.7.0", "is-terminal", "itoa", "log", @@ -4370,9 +4369,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ "once_cell", "wasm-bindgen", @@ -4911,7 +4910,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "metrics-util", "quanta", @@ -4943,7 +4942,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.2", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "ordered-float", "quanta", @@ -5110,9 +5109,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", "unsigned-varint", @@ -5362,7 +5361,7 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -5377,7 +5376,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -5414,7 +5413,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-genesis", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", "unsigned-varint", ] @@ -5468,7 +5467,7 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -5548,9 +5547,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arbitrary", "arrayvec", @@ -5559,20 +5558,19 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", - "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.7.0" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 1.0.109", ] [[package]] @@ -5638,12 +5636,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.5", "ucd-trie", ] @@ -6093,7 +6091,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -6112,7 +6110,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.5", "tinyvec", "tracing", "web-time", @@ -6570,7 +6568,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -6606,7 +6604,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tower 0.4.13", "tracing", @@ -6660,7 +6658,7 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -6816,7 +6814,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tikv-jemallocator", "tracy-client", ] @@ -6962,7 +6960,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7019,7 +7017,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -7061,7 +7059,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -7086,7 +7084,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -7112,7 +7110,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -7151,7 +7149,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7228,7 +7226,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7284,7 +7282,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -7311,7 +7309,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", ] @@ -7366,7 +7364,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -7412,7 +7410,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7445,7 +7443,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7474,7 +7472,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7540,7 +7538,7 @@ dependencies = [ "proptest-derive", "rustc-hash 2.1.0", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7645,7 +7643,7 @@ dependencies = [ "reth-prune-types", "reth-storage-errors", "revm-primitives", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7740,7 +7738,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -7767,7 +7765,7 @@ version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -7810,7 +7808,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7827,7 +7825,7 @@ dependencies = [ "criterion", "dashmap 6.1.0", "derive_more", - "indexmap 2.6.0", + "indexmap 2.7.0", "parking_lot", "pprof", "rand 0.8.5", @@ -7835,7 +7833,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -7874,7 +7872,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -7934,7 +7932,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7959,7 +7957,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", ] @@ -7997,7 +7995,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "url", ] @@ -8028,7 +8026,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", "zstd", ] @@ -8162,7 +8160,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "toml", "tracing", @@ -8492,7 +8490,7 @@ dependencies = [ "reth-transaction-pool", "revm", "sha2 0.10.8", - "thiserror 2.0.3", + "thiserror 2.0.5", "tracing", ] @@ -8560,7 +8558,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -8625,7 +8623,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", ] @@ -8800,7 +8798,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.0", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -8821,7 +8819,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.3", + "thiserror 2.0.5", "toml", ] @@ -8907,7 +8905,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tower 0.4.13", @@ -9001,7 +8999,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-util", "tower 0.4.13", @@ -9042,7 +9040,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -9127,7 +9125,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9228,7 +9226,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -9255,7 +9253,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9361,7 +9359,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", "tracing-futures", @@ -9445,7 +9443,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9561,7 +9559,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.3", + "thiserror 2.0.5", "tokio", "tracing", ] @@ -9587,7 +9585,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.3", + "thiserror 2.0.5", ] [[package]] @@ -9607,9 +9605,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41bbeb6004cc4ed48d27756f0479011df91a6f5642a3abab9309eda5ce67c4ad" +checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9766,9 +9764,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81dc953b2244ddd5e7860cb0bb2a790494b898ef321d4aff8e260efab60cc88" +checksum = "395b0c39c00f9296f3937624c1fa4e0ee44f8c0e4b2c49408179ef381c6c2e6e" dependencies = [ "bytemuck", "byteorder", @@ -10226,7 +10224,7 @@ version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -10286,7 +10284,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -10530,9 +10528,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -10821,11 +10819,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "643caef17e3128658ff44d85923ef2d28af81bb71e0d67bbfe1d76f19a73e053" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.5", ] [[package]] @@ -10841,9 +10839,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "995d0bbc9995d1f19d28b7215a9352b0fc3cd3a2d2ec95c2cadc485cdedbcdde" dependencies = [ "proc-macro2", "quote", @@ -10902,9 +10900,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -10926,9 +10924,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -10980,9 +10978,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -11009,20 +11007,19 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -11048,9 +11045,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -11088,7 +11085,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -11295,9 +11292,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.17.4" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" +checksum = "51e295eae54124872df35720dc3a5b1e827c7deee352b342ec7f7e626d0d0ef3" dependencies = [ "loom", "once_cell", @@ -11654,9 +11651,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -11665,13 +11662,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.90", @@ -11680,9 +11676,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.47" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", @@ -11693,9 +11689,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11703,9 +11699,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -11716,9 +11712,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-streams" @@ -11749,9 +11745,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", From 465692b5aff5718f10725077a9caf0a7b7c55297 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 10:57:50 +0100 Subject: [PATCH 23/70] test: add tracing test (#13221) --- crates/rpc/rpc-testing-util/tests/it/trace.rs | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index e67946f7b0a1..47932bd7302c 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -126,3 +126,47 @@ async fn debug_trace_block_entire_chain() { } println!("Traced all blocks in {:?}", now.elapsed()); } + +/// This is intended to be run locally against a running node. This traces all blocks for a given +/// chain. +/// +/// This is a noop of env var `RETH_RPC_TEST_NODE_URL` is not set. +#[tokio::test(flavor = "multi_thread")] +async fn debug_trace_block_opcodes_entire_chain() { + let opcodes7702 = ["EXTCODESIZE", "EXTCODECOPY", "EXTCODEHASH"]; + let url = parse_env_url("RETH_RPC_TEST_NODE_URL"); + if url.is_err() { + return + } + let url = url.unwrap(); + + let client = HttpClientBuilder::default().build(url).unwrap(); + let current_block: u64 = + >::block_number(&client) + .await + .unwrap() + .try_into() + .unwrap(); + let range = 0..=current_block; + println!("Tracing blocks {range:?} for opcodes"); + let mut stream = client.trace_block_opcode_gas_unordered(range, 2).enumerate(); + let now = Instant::now(); + while let Some((num, next)) = stream.next().await { + match next { + Ok((block_opcodes, block)) => { + for opcode in opcodes7702 { + if block_opcodes.contains(opcode) { + eprintln!("Found opcode {opcode}: in {block}"); + } + } + } + Err((err, block)) => { + eprintln!("Error tracing block {block:?}: {err}"); + } + }; + if num % 10000 == 0 { + println!("Traced {num} blocks"); + } + } + println!("Traced all blocks in {:?}", now.elapsed()); +} From f7a3476046a0fd5d8063a6a191c661c1cf0fc28c Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 9 Dec 2024 11:40:43 +0100 Subject: [PATCH 24/70] chore(engine): simplify StateRootTask creation and hook management (#13213) --- crates/engine/tree/src/tree/root.rs | 31 +++++++++++++++++------------ 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index dc0563ade502..ae22b036b65f 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,6 +1,7 @@ //! State root task related functionality. use alloy_primitives::map::{HashMap, HashSet}; +use reth_evm::system_calls::OnStateHook; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, StateCommitmentProvider, @@ -20,7 +21,7 @@ use std::{ collections::BTreeMap, ops::Deref, sync::{ - mpsc::{self, Receiver, Sender}, + mpsc::{self, channel, Receiver, Sender}, Arc, }, time::{Duration, Instant}, @@ -249,11 +250,9 @@ where + 'static, { /// Creates a new state root task with the unified message channel - pub(crate) fn new( - config: StateRootConfig, - tx: Sender, - rx: Receiver, - ) -> Self { + pub(crate) fn new(config: StateRootConfig) -> Self { + let (tx, rx) = channel(); + Self { config, rx, @@ -279,6 +278,15 @@ where StateRootHandle::new(rx) } + /// Returns a state hook to be used to send state updates to this task. + pub(crate) fn state_hook(&self) -> impl OnStateHook { + let state_hook = StateHookSender::new(self.tx.clone()); + + move |state: &EvmState| { + let _ = state_hook.send(StateRootMessage::StateUpdate(state.clone())); + } + } + /// Handles state updates. /// /// Returns proof targets derived from the state update. @@ -670,7 +678,6 @@ mod tests { reth_tracing::init_test_tracing(); let factory = create_test_provider_factory(); - let (tx, rx) = std::sync::mpsc::channel(); let state_updates = create_mock_state_updates(10, 10); let mut hashed_state = HashedPostState::default(); @@ -721,16 +728,14 @@ mod tests { consistent_view: ConsistentDbView::new(factory, None), input: Arc::new(TrieInput::from_state(hashed_state)), }; - let task = StateRootTask::new(config, tx.clone(), rx); + let task = StateRootTask::new(config); + let mut state_hook = task.state_hook(); let handle = task.spawn(); - let state_hook_sender = StateHookSender::new(tx); for update in state_updates { - state_hook_sender - .send(StateRootMessage::StateUpdate(update)) - .expect("failed to send state"); + state_hook.on_state(&update); } - drop(state_hook_sender); + drop(state_hook); let (root_from_task, _) = handle.wait_for_result().expect("task failed"); let root_from_base = state_root(accumulated_state); From d68d7c8da0f13acfa356284c4ecf54047f964c25 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 06:15:41 -0500 Subject: [PATCH 25/70] feat: bound NetworkPrimitives types by proper traits (#13196) --- crates/net/eth-wire-types/src/primitives.rs | 43 ++------------------- 1 file changed, 4 insertions(+), 39 deletions(-) diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs index 78083e9e0928..17f1943186a8 100644 --- a/crates/net/eth-wire-types/src/primitives.rs +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -1,7 +1,7 @@ //! Abstraction over primitive types in network messages. use alloy_rlp::{Decodable, Encodable}; -use reth_primitives_traits::{Block, BlockHeader, SignedTransaction}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SignedTransaction}; use std::fmt::Debug; /// Abstraction over primitive types which might appear in network messages. See @@ -10,56 +10,21 @@ pub trait NetworkPrimitives: Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static { /// The block header type. - type BlockHeader: BlockHeader - + Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type BlockHeader: BlockHeader + 'static; /// The block body type. - type BlockBody: Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type BlockBody: BlockBody + 'static; /// Full block type. type Block: Block
+ Encodable + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq + 'static; /// The transaction type which peers announce in `Transactions` messages. It is different from /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being /// announced and can only be explicitly requested from peers. - type BroadcastedTransaction: Encodable - + Decodable - + Send - + Sync - + Unpin - + Clone - + Debug - + PartialEq - + Eq - + 'static; + type BroadcastedTransaction: SignedTransaction + 'static; /// The transaction type which peers return in `PooledTransactions` messages. type PooledTransaction: SignedTransaction + TryFrom + 'static; From 13302ca655d3d21422be683881a4248499f0f1d2 Mon Sep 17 00:00:00 2001 From: Alessandro Mazza <121622391+alessandromazza98@users.noreply.github.com> Date: Mon, 9 Dec 2024 13:09:54 +0100 Subject: [PATCH 26/70] feat(db): make init_db function accepts a TableSet (#13222) --- crates/storage/db/src/mdbx.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index d6947e10bd2b..c0e11079f3ae 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -1,6 +1,6 @@ //! Bindings for [MDBX](https://libmdbx.dqdkfa.ru/). -use crate::is_database_empty; +use crate::{is_database_empty, TableSet, Tables}; use eyre::Context; use std::path::Path; @@ -28,12 +28,21 @@ pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Resu Ok(DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args)?) } -/// Opens up an existing database or creates a new one at the specified path. Creates tables if -/// necessary. Read/Write mode. +/// Opens up an existing database or creates a new one at the specified path. Creates tables defined +/// in [`Tables`] if necessary. Read/Write mode. pub fn init_db>(path: P, args: DatabaseArguments) -> eyre::Result { + init_db_for::(path, args) +} + +/// Opens up an existing database or creates a new one at the specified path. Creates tables defined +/// in the given [`TableSet`] if necessary. Read/Write mode. +pub fn init_db_for, TS: TableSet>( + path: P, + args: DatabaseArguments, +) -> eyre::Result { let client_version = args.client_version().clone(); let db = create_db(path, args)?; - db.create_tables()?; + db.create_tables_for::()?; db.record_client_version(client_version)?; Ok(db) } From 233f893a932c2ae1d19266fbdd90838c569609c6 Mon Sep 17 00:00:00 2001 From: greg <82421016+greged93@users.noreply.github.com> Date: Mon, 9 Dec 2024 15:14:45 +0100 Subject: [PATCH 27/70] fix: deny advisory RUSTSEC-2024-0421 (#13227) Signed-off-by: Gregory Edison --- Cargo.lock | 170 +++++++++++++++++---------------- crates/net/dns/Cargo.toml | 4 +- crates/net/dns/src/resolver.rs | 18 ++-- 3 files changed, 101 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0dfce7a28091..fb49d13e8724 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1112,6 +1112,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "async-sse" version = "5.1.0" @@ -3666,6 +3677,54 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hickory-proto" +version = "0.25.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.8.5", + "serde", + "thiserror 2.0.5", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.5", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -4053,16 +4112,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -4818,15 +4867,6 @@ dependencies = [ "hashbrown 0.15.2", ] -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "lz4_flex" version = "0.11.3" @@ -5071,6 +5111,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "moka" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "triomphe", + "uuid", +] + [[package]] name = "more-asserts" version = "0.3.1" @@ -7098,6 +7158,7 @@ dependencies = [ "alloy-rlp", "data-encoding", "enr", + "hickory-resolver", "linked_hash_set", "parking_lot", "rand 0.8.5", @@ -7114,7 +7175,6 @@ dependencies = [ "tokio", "tokio-stream", "tracing", - "trust-dns-resolver", ] [[package]] @@ -10725,6 +10785,12 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -11323,51 +11389,10 @@ dependencies = [ ] [[package]] -name = "trust-dns-proto" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand 0.8.5", - "smallvec", - "thiserror 1.0.69", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.2" +name = "triomphe" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot", - "rand 0.8.5", - "resolv-conf", - "serde", - "smallvec", - "thiserror 1.0.69", - "tokio", - "tracing", - "trust-dns-proto", -] +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" [[package]] name = "try-lock" @@ -11443,27 +11468,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-segmentation" version = "1.12.0" @@ -11528,7 +11538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", "serde", ] diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 2f71354a7dd8..9e3e93d12f84 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -27,7 +27,7 @@ tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true # trust-dns -trust-dns-resolver = "0.23" +hickory-resolver = { version = "0.25.0-alpha.4" } # misc data-encoding = "2" @@ -58,6 +58,6 @@ serde = [ "parking_lot/serde", "rand/serde", "secp256k1/serde", - "trust-dns-resolver/serde", + "hickory-resolver/serde", "reth-ethereum-forks/serde" ] diff --git a/crates/net/dns/src/resolver.rs b/crates/net/dns/src/resolver.rs index 42c444f89a75..255f2ad4a102 100644 --- a/crates/net/dns/src/resolver.rs +++ b/crates/net/dns/src/resolver.rs @@ -1,10 +1,10 @@ //! Perform DNS lookups +use hickory_resolver::name_server::ConnectionProvider; +pub use hickory_resolver::{ResolveError, TokioResolver}; use parking_lot::RwLock; use std::{collections::HashMap, future::Future}; use tracing::trace; -pub use trust_dns_resolver::{error::ResolveError, TokioAsyncResolver}; -use trust_dns_resolver::{name_server::ConnectionProvider, AsyncResolver}; /// A type that can lookup DNS entries pub trait Resolver: Send + Sync + Unpin + 'static { @@ -12,7 +12,7 @@ pub trait Resolver: Send + Sync + Unpin + 'static { fn lookup_txt(&self, query: &str) -> impl Future> + Send; } -impl Resolver for AsyncResolver

{ +impl Resolver for hickory_resolver::Resolver

{ async fn lookup_txt(&self, query: &str) -> Option { // See: [AsyncResolver::txt_lookup] // > *hint* queries that end with a '.' are fully qualified names and are cheaper lookups @@ -33,7 +33,7 @@ impl Resolver for AsyncResolver

{ /// An asynchronous DNS resolver /// -/// See also [`TokioAsyncResolver`] +/// See also [`TokioResolver`] /// /// ``` /// # fn t() { @@ -43,16 +43,16 @@ impl Resolver for AsyncResolver

{ /// ``` /// /// Note: This [Resolver] can send multiple lookup attempts, See also -/// [`ResolverOpts`](trust_dns_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) +/// [`ResolverOpts`](hickory_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) /// by default. #[derive(Clone, Debug)] -pub struct DnsResolver(TokioAsyncResolver); +pub struct DnsResolver(TokioResolver); // === impl DnsResolver === impl DnsResolver { - /// Create a new resolver by wrapping the given [`AsyncResolver`] - pub const fn new(resolver: TokioAsyncResolver) -> Self { + /// Create a new resolver by wrapping the given [`TokioResolver`]. + pub const fn new(resolver: TokioResolver) -> Self { Self(resolver) } @@ -60,7 +60,7 @@ impl DnsResolver { /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. pub fn from_system_conf() -> Result { - TokioAsyncResolver::tokio_from_system_conf().map(Self::new) + TokioResolver::tokio_from_system_conf().map(Self::new) } } From 3e859058cdc42c7f380b3f5a61e6f40eec3d1741 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 15:59:18 +0100 Subject: [PATCH 28/70] chore: add default impls for withdrawals and ommers root (#13229) --- crates/primitives-traits/src/block/body.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index cec329990704..14436ee01c58 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,7 +1,7 @@ //! Block body abstraction. use crate::{ - FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, + BlockHeader, FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, SignedTransaction, }; use alloc::{fmt, vec::Vec}; @@ -36,7 +36,7 @@ pub trait BlockBody: type Transaction: SignedTransaction; /// Ommer header type. - type OmmerHeader; + type OmmerHeader: BlockHeader; /// Returns reference to transactions in block. fn transactions(&self) -> &[Self::Transaction]; @@ -52,9 +52,25 @@ pub trait BlockBody: /// Returns block withdrawals if any. fn withdrawals(&self) -> Option<&Withdrawals>; + /// Calculate the withdrawals root for the block body. + /// + /// Returns `None` if there are no withdrawals in the block. + fn calculate_withdrawals_root(&self) -> Option { + self.withdrawals().map(|withdrawals| { + alloy_consensus::proofs::calculate_withdrawals_root(withdrawals.as_slice()) + }) + } + /// Returns block ommers if any. fn ommers(&self) -> Option<&[Self::OmmerHeader]>; + /// Calculate the ommers root for the block body. + /// + /// Returns `None` if there are no ommers in the block. + fn calculate_ommers_root(&self) -> Option { + self.ommers().map(alloy_consensus::proofs::calculate_ommers_root) + } + /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. fn blob_gas_used(&self) -> u64 { // TODO(mattss): simplify after From 49d3d82b64a9a0f7dd2ee46dc7856e2d383b3fa5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 15:59:34 +0100 Subject: [PATCH 29/70] chore: use BlockHeader trait (#13224) --- crates/transaction-pool/src/validate/eth.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index e3b7af736cd8..998de5ffb510 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -11,9 +11,12 @@ use crate::{ EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, +use alloy_consensus::{ + constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, + }, + BlockHeader, }; use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; use reth_chainspec::{ChainSpec, EthereumHardforks}; @@ -102,7 +105,7 @@ where } fn on_new_head_block(&self, new_tip_block: &SealedBlock) { - self.inner.on_new_head_block(new_tip_block) + self.inner.on_new_head_block(new_tip_block.header()) } } @@ -469,17 +472,17 @@ where } } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + fn on_new_head_block(&self, new_tip_block: &T) { // update all forks - if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.prague.store(true, std::sync::atomic::Ordering::Relaxed); } } From 3bc7b00fb3e6cc449e93aa72eaf40d566f4d66fb Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 9 Dec 2024 16:03:50 +0100 Subject: [PATCH 30/70] ci: move deny to lint (#13230) --- .github/workflows/deny.yml | 27 --------------------------- .github/workflows/lint.yml | 4 ++++ 2 files changed, 4 insertions(+), 27 deletions(-) delete mode 100644 .github/workflows/deny.yml diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml deleted file mode 100644 index f85484ca2ec2..000000000000 --- a/.github/workflows/deny.yml +++ /dev/null @@ -1,27 +0,0 @@ -# Runs `cargo-deny` when modifying `Cargo.lock`. - -name: deny - -on: - push: - branches: [main] - paths: [Cargo.lock] - pull_request: - branches: [main] - paths: [Cargo.lock] - merge_group: - -env: - CARGO_TERM_COLOR: always - -concurrency: deny-${{ github.head_ref || github.run_id }} - -jobs: - deny: - name: deny - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: EmbarkStudios/cargo-deny-action@v2 - with: - command: check all diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 61ba54e95568..418fd4cc4e68 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -251,6 +251,9 @@ jobs: zepter --version time zepter run check + deny: + uses: ithacaxyz/ci/.github/workflows/deny.yml@main + lint-success: name: lint success runs-on: ubuntu-latest @@ -269,6 +272,7 @@ jobs: - no-test-deps - features - feature-propagation + - deny timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed From da5ffc24c229f8b0b459213b536ad8e7396c7348 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 9 Dec 2024 15:12:36 +0000 Subject: [PATCH 31/70] fix(trie): reveal extension child in sparse trie when updating a leaf (#13183) --- crates/trie/sparse/src/state.rs | 48 +++---- crates/trie/sparse/src/trie.rs | 216 +++++++++++++++++--------------- 2 files changed, 141 insertions(+), 123 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index ec51df8982c3..6638632f0adc 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -273,30 +273,6 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update the account leaf node. - pub fn update_account_leaf( - &mut self, - path: Nibbles, - value: Vec, - ) -> SparseStateTrieResult<()> { - self.state.update_leaf(path, value)?; - Ok(()) - } - - /// Update the leaf node of a storage trie at the provided address. - pub fn update_storage_leaf( - &mut self, - address: B256, - slot: Nibbles, - value: Vec, - ) -> SparseStateTrieResult<()> { - if let Some(storage_trie) = self.storages.get_mut(&address) { - Ok(storage_trie.update_leaf(slot, value)?) - } else { - Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) - } - } - /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { if let Some(trie) = self.storages.get_mut(&address) { @@ -354,6 +330,30 @@ where SparseTrieError: From<::Error> + From<::Error>, { + /// Update the account leaf node. + pub fn update_account_leaf( + &mut self, + path: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + self.state.update_leaf(path, value)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn update_storage_leaf( + &mut self, + address: B256, + slot: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.update_leaf(slot, value)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } + } + /// Update or remove trie account based on new account info. This method will either recompute /// the storage root based on update storage trie or look it up from existing leaf value. /// diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8fff0819bcba..e4d4ff701f3e 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -104,13 +104,6 @@ impl

SparseTrie

{ Ok(self.as_revealed_mut().unwrap()) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - revealed.update_leaf(path, value)?; - Ok(()) - } - /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; @@ -134,6 +127,13 @@ where P: BlindedProvider, SparseTrieError: From, { + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.update_leaf(path, value)?; + Ok(()) + } + /// Remove the leaf node. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; @@ -372,98 +372,6 @@ impl

RevealedSparseTrie

{ self.reveal_node(path, TrieNode::decode(&mut &child[..])?, None) } - /// Update the leaf node with provided value. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - self.prefix_set.insert(path.clone()); - let existing = self.values.insert(path.clone(), value); - if existing.is_some() { - // trie structure unchanged, return immediately - return Ok(()) - } - - let mut current = Nibbles::default(); - while let Some(node) = self.nodes.get_mut(¤t) { - match node { - SparseNode::Empty => { - *node = SparseNode::new_leaf(path); - break - } - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) - } - SparseNode::Leaf { key: current_key, .. } => { - current.extend_from_slice_unchecked(current_key); - - // this leaf is being updated - if current == path { - unreachable!("we already checked leaf presence in the beginning"); - } - - // find the common prefix - let common = current.common_prefix_length(&path); - - // update existing node - let new_ext_key = current.slice(current.len() - current_key.len()..common); - *node = SparseNode::new_ext(new_ext_key); - - // create a branch node and corresponding leaves - self.nodes.insert( - current.slice(..common), - SparseNode::new_split_branch(current[common], path[common]), - ); - self.nodes.insert( - path.slice(..=common), - SparseNode::new_leaf(path.slice(common + 1..)), - ); - self.nodes.insert( - current.slice(..=common), - SparseNode::new_leaf(current.slice(common + 1..)), - ); - - break; - } - SparseNode::Extension { key, .. } => { - current.extend_from_slice(key); - if !path.starts_with(¤t) { - // find the common prefix - let common = current.common_prefix_length(&path); - - *key = current.slice(current.len() - key.len()..common); - - // create state mask for new branch node - // NOTE: this might overwrite the current extension node - let branch = SparseNode::new_split_branch(current[common], path[common]); - self.nodes.insert(current.slice(..common), branch); - - // create new leaf - let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); - self.nodes.insert(path.slice(..=common), new_leaf); - - // recreate extension to previous child if needed - let key = current.slice(common + 1..); - if !key.is_empty() { - self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); - } - - break; - } - } - SparseNode::Branch { state_mask, .. } => { - let nibble = path[current.len()]; - current.push_unchecked(nibble); - if !state_mask.is_bit_set(nibble) { - state_mask.set_bit(nibble); - let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); - self.nodes.insert(current, new_leaf); - break; - } - } - }; - } - - Ok(()) - } - /// Traverse trie nodes down to the leaf node and collect all nodes along the path. fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { let mut current = Nibbles::default(); // Start traversal from the root @@ -866,6 +774,116 @@ where P: BlindedProvider, SparseTrieError: From, { + /// Update the leaf node with provided value. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + self.prefix_set.insert(path.clone()); + let existing = self.values.insert(path.clone(), value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } + + let mut current = Nibbles::default(); + while let Some(node) = self.nodes.get_mut(¤t) { + match node { + SparseNode::Empty => { + *node = SparseNode::new_leaf(path); + break + } + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: current_key, .. } => { + current.extend_from_slice_unchecked(current_key); + + // this leaf is being updated + if current == path { + unreachable!("we already checked leaf presence in the beginning"); + } + + // find the common prefix + let common = current.common_prefix_length(&path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.insert( + current.slice(..common), + SparseNode::new_split_branch(current[common], path[common]), + ); + self.nodes.insert( + path.slice(..=common), + SparseNode::new_leaf(path.slice(common + 1..)), + ); + self.nodes.insert( + current.slice(..=common), + SparseNode::new_leaf(current.slice(common + 1..)), + ); + + break; + } + SparseNode::Extension { key, .. } => { + current.extend_from_slice(key); + + if !path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(&path); + *key = current.slice(current.len() - key.len()..common); + + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + if self.updates.is_some() { + // Check if the extension node child is a hash that needs to be revealed + if self.nodes.get(¤t).unwrap().is_hash() { + if let Some(node) = self.provider.blinded_node(current.clone())? { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!(target: "trie::sparse", ?current, ?decoded, "Revealing extension node child"); + // We'll never have to update the revealed child node, only + // remove or do nothing, so + // we can safely ignore the hash mask here and + // pass `None`. + self.reveal_node(current.clone(), decoded, None)?; + } + } + } + + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + let branch = SparseNode::new_split_branch(current[common], path[common]); + self.nodes.insert(current.slice(..common), branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); + self.nodes.insert(path.slice(..=common), new_leaf); + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); + } + + break; + } + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + break; + } + } + }; + } + + Ok(()) + } + /// Remove leaf node from the trie. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { if self.values.remove(path).is_none() { From b5bbb8d751d48be15b4f5c13c965716be9c1eeda Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:14:39 -0500 Subject: [PATCH 32/70] chore: rename HighestStaticFiles::min (#13235) --- crates/cli/commands/src/prune.rs | 3 ++- crates/cli/commands/src/stage/unwind.rs | 2 +- crates/stages/api/src/pipeline/mod.rs | 2 +- crates/static-file/types/src/lib.rs | 12 ++++++------ 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index 37f0637b0a5c..a5b186bc1380 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -24,7 +24,8 @@ impl> PruneComma info!(target: "reth::cli", "Copying data from database to static files..."); let static_file_producer = StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); - let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min(); + let lowest_static_file_height = + static_file_producer.lock().copy_to_static_files()?.min_block_num(); info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); // Delete data which has been copied to static files. diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index 2d29121d0698..de535d65508a 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -58,7 +58,7 @@ impl> Command let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() - .max() + .max_block_num() .filter(|highest_static_file_block| *highest_static_file_block > target); // Execute a pipeline unwind if the start of the range overlaps the existing static diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 39d26cd88082..ec57de8d11c9 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -256,7 +256,7 @@ impl Pipeline { pub fn move_to_static_files(&self) -> RethResult<()> { // Copies data from database to static files let lowest_static_file_height = - self.static_file_producer.lock().copy_to_static_files()?.min(); + self.static_file_producer.lock().copy_to_static_files()?.min_block_num(); // Deletes data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 4fc9c545e7c1..7a9980b35595 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -55,12 +55,12 @@ impl HighestStaticFiles { } /// Returns the minimum block of all segments. - pub fn min(&self) -> Option { + pub fn min_block_num(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).min() } /// Returns the maximum block of all segments. - pub fn max(&self) -> Option { + pub fn max_block_num(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() } } @@ -154,11 +154,11 @@ mod tests { HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; // Minimum value among the available segments - assert_eq!(files.min(), Some(100)); + assert_eq!(files.min_block_num(), Some(100)); let empty_files = HighestStaticFiles::default(); // No values, should return None - assert_eq!(empty_files.min(), None); + assert_eq!(empty_files.min_block_num(), None); } #[test] @@ -167,11 +167,11 @@ mod tests { HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; // Maximum value among the available segments - assert_eq!(files.max(), Some(500)); + assert_eq!(files.max_block_num(), Some(500)); let empty_files = HighestStaticFiles::default(); // No values, should return None - assert_eq!(empty_files.max(), None); + assert_eq!(empty_files.max_block_num(), None); } #[test] From a3e90e18b648edc20e18bcb615b46f8ec35d12fe Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 9 Dec 2024 17:33:17 +0000 Subject: [PATCH 33/70] fix(trie): retain updates only for sparse branch nodes in the prefix set (#13234) --- crates/trie/sparse/src/trie.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e4d4ff701f3e..b4adc8c60a52 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -623,6 +623,7 @@ impl

RevealedSparseTrie

{ )); continue } + let retain_updates = self.updates.is_some() && prefix_set_contains(&path); buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first @@ -650,7 +651,7 @@ impl

RevealedSparseTrie

{ buffers.rlp_node_stack.pop().unwrap(); // Update the masks only if we need to retain trie updates - if self.updates.is_some() { + if retain_updates { // Set the trie mask let tree_mask_value = if node_type.store_in_db_trie() { // A branch or an extension node explicitly set the @@ -716,7 +717,7 @@ impl

RevealedSparseTrie

{ // Save a branch node update only if it's not a root node, and we need to // persist updates. let store_in_db_trie_value = if let Some(updates) = - self.updates.as_mut().filter(|_| !path.is_empty()) + self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) { let mut tree_mask_values = tree_mask_values.into_iter().rev(); let mut hash_mask_values = hash_mask_values.into_iter().rev(); From 3af2afe99528750dc7465433c4248fd1435bf3e0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 19:26:32 +0100 Subject: [PATCH 34/70] chore: relax more consensus functions (#13236) --- crates/consensus/common/Cargo.toml | 4 +- crates/consensus/common/src/validation.rs | 70 +++++++++++++---------- crates/ethereum/consensus/src/lib.rs | 2 +- crates/optimism/consensus/src/lib.rs | 2 +- crates/primitives/src/block.rs | 62 +++++++++++--------- 5 files changed, 79 insertions(+), 61 deletions(-) diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 272adbb9297a..a9a0c69ae559 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -13,8 +13,8 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives.workspace = true reth-consensus.workspace = true +reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true @@ -24,8 +24,8 @@ alloy-consensus.workspace = true alloy-eips.workspace = true [dev-dependencies] +alloy-consensus.workspace = true reth-storage-api.workspace = true rand.workspace = true mockall = "0.13" -alloy-consensus.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 2c38fa2d6cd4..37b6138e5d46 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,14 +1,14 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ calc_next_block_base_fee, eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, }; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{BlockBody, EthereumHardfork, GotExpected, SealedBlock, SealedHeader}; -use reth_primitives_traits::BlockBody as _; +use reth_primitives::SealedBlock; +use reth_primitives_traits::{BlockBody, GotExpected, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. @@ -43,11 +43,11 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals( +pub fn validate_shanghai_withdrawals( block: &SealedBlock, ) -> Result<(), ConsensusError> { let withdrawals = block.body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + let withdrawals_root = alloy_consensus::proofs::calculate_withdrawals_root(withdrawals); let header_withdrawals_root = block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?; if withdrawals_root != *header_withdrawals_root { @@ -64,7 +64,7 @@ pub fn validate_shanghai_withdrawals( +pub fn validate_cancun_gas( block: &SealedBlock, ) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each @@ -87,28 +87,31 @@ pub fn validate_cancun_gas /// - ommer hash /// - transaction root /// - withdrawals root -pub fn validate_body_against_header( - body: &BlockBody, - header: &SealedHeader, -) -> Result<(), ConsensusError> { +pub fn validate_body_against_header(body: &B, header: &H) -> Result<(), ConsensusError> +where + B: BlockBody, + H: BlockHeader, +{ let ommers_hash = body.calculate_ommers_root(); - if header.ommers_hash != ommers_hash { + if Some(header.ommers_hash()) != ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: header.ommers_hash(), + } + .into(), )) } let tx_root = body.calculate_tx_root(); - if header.transactions_root != tx_root { + if header.transactions_root() != tx_root { return Err(ConsensusError::BodyTransactionRootDiff( - GotExpected { got: tx_root, expected: header.transactions_root }.into(), + GotExpected { got: tx_root, expected: header.transactions_root() }.into(), )) } - match (header.withdrawals_root, &body.withdrawals) { - (Some(header_withdrawals_root), Some(withdrawals)) => { - let withdrawals = withdrawals.as_slice(); - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + match (header.withdrawals_root(), body.calculate_withdrawals_root()) { + (Some(header_withdrawals_root), Some(withdrawals_root)) => { if withdrawals_root != header_withdrawals_root { return Err(ConsensusError::BodyWithdrawalsRootDiff( GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), @@ -130,15 +133,24 @@ pub fn validate_body_against_header( /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation /// - (Optionally) Compares the receipts root in the block header to the block body -pub fn validate_block_pre_execution( - block: &SealedBlock, +pub fn validate_block_pre_execution( + block: &SealedBlock, chain_spec: &ChainSpec, -) -> Result<(), ConsensusError> { +) -> Result<(), ConsensusError> +where + H: BlockHeader, + B: BlockBody, + ChainSpec: EthereumHardforks, +{ // Check ommers hash let ommers_hash = block.body.calculate_ommers_root(); - if block.header.ommers_hash != ommers_hash { + if Some(block.header.ommers_hash()) != ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: block.header.ommers_hash(), + } + .into(), )) } @@ -148,11 +160,11 @@ pub fn validate_block_pre_execution( } // EIP-4895: Beacon chain push withdrawals as operations - if chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + if chain_spec.is_shanghai_active_at_timestamp(block.timestamp()) { validate_shanghai_withdrawals(block)?; } - if chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + if chain_spec.is_cancun_active_at_timestamp(block.timestamp()) { validate_cancun_gas(block)?; } @@ -222,12 +234,12 @@ pub fn validate_header_extradata(header: &H) -> Result<(), Conse #[inline] pub fn validate_against_parent_hash_number( header: &H, - parent: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Parent number is consistent. - if parent.number + 1 != header.number() { + if parent.number() + 1 != header.number() { return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: parent.number, + parent_block_number: parent.number(), block_number: header.number(), }) } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ba737e56728c..4d3ba6282694 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -121,7 +121,7 @@ impl Consensu body: &BlockBody, header: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) + validate_body_against_header(body, header.header()) } fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 6d457f42c901..d05ff9c9bd76 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -65,7 +65,7 @@ impl Consensus for OpBeaconConsensus { body: &BlockBody, header: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_body_against_header(body, header) + validate_body_against_header(body, header.header()) } fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 799e3e7a4c92..b02456f6c486 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -296,6 +296,40 @@ impl SealedBlock { } } +impl SealedBlock +where + H: alloy_consensus::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Ensures that the transaction root in the block header is valid. + /// + /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure + /// populated with each transaction in the transactions list portion of the block. + /// + /// # Returns + /// + /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, + /// indicating that the transactions in the block are correctly represented in the trie. + /// + /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` + /// error containing the calculated and expected roots. + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> + where + B::Transaction: Encodable2718, + { + let calculated_root = self.body.calculate_tx_root(); + + if self.header.transactions_root() != calculated_root { + return Err(GotExpected { + got: calculated_root, + expected: self.header.transactions_root(), + }) + } + + Ok(()) + } +} + impl SealedBlock where H: reth_primitives_traits::BlockHeader, @@ -385,34 +419,6 @@ where Block::new(self.header.unseal(), self.body) } - /// Ensures that the transaction root in the block header is valid. - /// - /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure - /// populated with each transaction in the transactions list portion of the block. - /// - /// # Returns - /// - /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, - /// indicating that the transactions in the block are correctly represented in the trie. - /// - /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` - /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> - where - B::Transaction: Encodable2718, - { - let calculated_root = self.body.calculate_tx_root(); - - if self.header.transactions_root() != calculated_root { - return Err(GotExpected { - got: calculated_root, - expected: self.header.transactions_root(), - }) - } - - Ok(()) - } - /// Returns a vector of encoded 2718 transactions. /// /// This is also known as `raw transactions`. From c7c84f2d3fc59cbec2288f6a097dc029c9fb0375 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 9 Dec 2024 23:08:49 +0400 Subject: [PATCH 35/70] feat: relax bounds for `eth_simulateV1` (#13232) --- Cargo.lock | 1 - crates/optimism/rpc/src/eth/call.rs | 6 +- crates/optimism/rpc/src/eth/pending_block.rs | 26 ++-- crates/optimism/rpc/src/eth/transaction.rs | 17 ++- crates/primitives-traits/src/block/mod.rs | 3 + crates/rpc/rpc-eth-api/Cargo.toml | 1 - crates/rpc/rpc-eth-api/src/core.rs | 8 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 44 +++--- crates/rpc/rpc-eth-api/src/helpers/mod.rs | 7 +- .../rpc-eth-api/src/helpers/pending_block.rs | 59 +++++--- .../rpc-eth-api/src/helpers/transaction.rs | 6 +- crates/rpc/rpc-eth-types/src/simulate.rs | 143 +++++------------- .../rpc/rpc-types-compat/src/transaction.rs | 4 + crates/rpc/rpc/src/eth/helpers/call.rs | 7 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 25 +-- crates/rpc/rpc/src/eth/helpers/types.rs | 15 ++ 16 files changed, 172 insertions(+), 200 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb49d13e8724..58982032e30f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9129,7 +9129,6 @@ dependencies = [ "reth-chainspec", "reth-errors", "reth-evm", - "reth-execution-types", "reth-network-api", "reth-node-api", "reth-primitives", diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index f7691756408c..959d765e3491 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -5,15 +5,15 @@ use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; use reth_provider::ProviderHeader; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, + helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, + FromEthApiError, FullEthApiTypes, IntoEthApiError, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; impl EthCall for OpEthApi where - Self: EstimateCall + LoadPendingBlock, + Self: EstimateCall + LoadBlock + FullEthApiTypes, N: OpNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5c437de76a7a..01c2264063e7 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -21,7 +21,7 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, SpecId}; +use revm::primitives::{BlockEnv, ExecutionResult}; impl LoadPendingBlock for OpEthApi where @@ -82,23 +82,26 @@ where fn assemble_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + block_env: &BlockEnv, parent_hash: B256, state_root: B256, transactions: Vec>, receipts: &[ProviderReceipt], ) -> reth_provider::ProviderBlock { let chain_spec = self.provider().chain_spec(); + let timestamp = block_env.timestamp.to::(); let transactions_root = calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root_no_memo_optimism( &receipts.iter().collect::>(), &chain_spec, - block_env.timestamp.to::(), + timestamp, ); let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); let header = Header { parent_hash, @@ -107,10 +110,9 @@ where state_root, transactions_root, receipts_root, - withdrawals_root: (cfg.handler_cfg.spec_id >= SpecId::SHANGHAI) - .then_some(EMPTY_WITHDRAWALS), + withdrawals_root: (is_shanghai).then_some(EMPTY_WITHDRAWALS), logs_bloom, - timestamp: block_env.timestamp.to::(), + timestamp, mix_hash: block_env.prevrandao.unwrap_or_default(), nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(block_env.basefee.to::()), @@ -118,15 +120,13 @@ where gas_limit: block_env.gas_limit.to::(), difficulty: U256::ZERO, gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), - blob_gas_used: (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then(|| { + blob_gas_used: is_cancun.then(|| { transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() }), excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), - parent_beacon_block_root: (cfg.handler_cfg.spec_id >= SpecId::CANCUN) - .then_some(B256::ZERO), - requests_hash: (cfg.handler_cfg.spec_id >= SpecId::PRAGUE) - .then_some(EMPTY_REQUESTS_HASH), + parent_beacon_block_root: is_cancun.then_some(B256::ZERO), + requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH), target_blobs_per_block: None, }; @@ -139,7 +139,7 @@ where fn assemble_receipt( &self, - tx: &reth_primitives::RecoveredTx>, + tx: &ProviderTx, result: ExecutionResult, cumulative_gas_used: u64, ) -> reth_provider::ProviderReceipt { diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index d455d8e897ee..468b46d97eba 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,7 +1,7 @@ //! Loads and formats OP transaction RPC response. use alloy_consensus::{Signed, Transaction as _}; -use alloy_primitives::{Bytes, Sealable, Sealed, B256}; +use alloy_primitives::{Bytes, PrimitiveSignature as Signature, Sealable, Sealed, B256}; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; @@ -14,7 +14,7 @@ use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, }; -use reth_rpc_eth_types::utils::recover_raw_transaction; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; @@ -151,6 +151,19 @@ where }) } + fn build_simulate_v1_transaction( + &self, + request: alloy_rpc_types_eth::TransactionRequest, + ) -> Result { + let Ok(tx) = request.build_typed_tx() else { + return Err(OpEthApiError::Eth(EthApiError::TransactionConversionError)) + }; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + Ok(TransactionSigned::new_unhashed(tx.into(), signature)) + } + fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { let input = match &mut tx.inner.inner { OpTxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 53afc7377687..1994075b9227 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -24,6 +24,9 @@ impl FullBlock for T where { } +/// Helper trait to access [`BlockBody::Transaction`] given a [`Block`]. +pub type BlockTx = <::Body as BlockBody>::Transaction; + /// Abstraction of block data type. // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 95ed98d80869..6f65b91d8f80 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -26,7 +26,6 @@ reth-rpc-types-compat.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-chainspec.workspace = true -reth-execution-types.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 810400c6f6e0..c103835a8010 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -13,7 +13,6 @@ use alloy_rpc_types_eth::{ }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_provider::BlockReader; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -372,12 +371,7 @@ impl RpcHeader, > for T where - T: FullEthApi< - Provider: BlockReader< - Header = alloy_consensus::Header, - Transaction = reth_primitives::TransactionSigned, - >, - >, + T: FullEthApi, jsonrpsee_types::error::ErrorObject<'static>: From, { /// Handler for: `eth_protocolVersion` diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index f6d665121fc4..e22fccc67261 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -20,9 +20,7 @@ use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_node_api::BlockBody; use reth_primitives_traits::SignedTransaction; -use reth_provider::{ - BlockIdReader, BlockReader, ChainSpecProvider, HeaderProvider, ProviderHeader, -}; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, ProviderHeader}; use reth_revm::{ database::StateProviderDatabase, db::CacheDB, @@ -50,7 +48,7 @@ pub type SimulatedBlocksResult = Result>>, /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. -pub trait EthCall: EstimateCall + Call + LoadPendingBlock { +pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthApiTypes { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, @@ -70,15 +68,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { &self, payload: SimulatePayload, block: Option, - ) -> impl Future> + Send - where - Self: LoadBlock< - Provider: BlockReader< - Header = alloy_consensus::Header, - Transaction = reth_primitives::TransactionSigned, - >, - > + FullEthApiTypes, - { + ) -> impl Future> + Send { async move { if payload.block_state_calls.len() > self.max_simulate_blocks() as usize { return Err(EthApiError::InvalidParams("too many blocks.".to_string()).into()) @@ -171,9 +161,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { block_env.gas_limit.to(), cfg.chain_id, &mut db, + this.tx_resp_builder(), )?; let mut calls = calls.into_iter().peekable(); + let mut senders = Vec::with_capacity(transactions.len()); let mut results = Vec::with_capacity(calls.len()); while let Some(tx) = calls.next() { @@ -197,18 +189,27 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { db.commit(res.state); } - results.push((env.tx.caller, res.result)); + senders.push(env.tx.caller); + results.push(res.result); } + let (block, _) = this.assemble_block_and_receipts( + &block_env, + parent_hash, + // state root calculation is skipped for performance reasons + B256::ZERO, + transactions, + results.clone(), + ); + let block: SimulatedBlock> = - simulate::build_block( + simulate::build_simulated_block( + senders, results, - transactions, - &block_env, - parent_hash, total_difficulty, return_full_transactions, this.tx_resp_builder(), + block, )?; parent_hash = block.inner.header.hash; @@ -245,10 +246,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock { bundle: Bundle, state_context: Option, mut state_override: Option, - ) -> impl Future, Self::Error>> + Send - where - Self: LoadBlock, - { + ) -> impl Future, Self::Error>> + Send { async move { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { @@ -608,7 +606,7 @@ pub trait Call: f: F, ) -> impl Future, Self::Error>> + Send where - Self: LoadBlock + LoadPendingBlock + LoadTransaction, + Self: LoadBlock + LoadTransaction, F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> Result + Send + 'static, diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 174cb3bad046..27d23da74b2e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -42,12 +42,9 @@ pub use transaction::{EthTransactions, LoadTransaction}; use crate::FullEthApiTypes; /// Extension trait that bundles traits needed for tracing transactions. -pub trait TraceExt: - LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call -{ -} +pub trait TraceExt: LoadTransaction + LoadBlock + SpawnBlocking + Trace + Call {} -impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + Trace + Call {} +impl TraceExt for T where T: LoadTransaction + LoadBlock + Trace + Call {} /// Helper trait to unify all `eth` rpc server building block traits, for simplicity. /// diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 2ca6c028c310..c6e0e0c5939c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -15,8 +15,7 @@ use reth_evm::{ state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockExt, InvalidTransactionError, RecoveredTx, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, InvalidTransactionError, SealedBlockWithSenders}; use reth_primitives_traits::receipt::ReceiptExt; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, ProviderError, @@ -199,7 +198,7 @@ pub trait LoadPendingBlock: /// Assembles a receipt for a transaction, based on its [`ExecutionResult`]. fn assemble_receipt( &self, - tx: &RecoveredTx>, + tx: &ProviderTx, result: ExecutionResult, cumulative_gas_used: u64, ) -> ProviderReceipt; @@ -207,14 +206,36 @@ pub trait LoadPendingBlock: /// Assembles a pending block. fn assemble_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + block_env: &BlockEnv, parent_hash: revm_primitives::B256, state_root: revm_primitives::B256, transactions: Vec>, receipts: &[ProviderReceipt], ) -> ProviderBlock; + /// Helper to invoke both [`Self::assemble_block`] and [`Self::assemble_receipt`]. + fn assemble_block_and_receipts( + &self, + block_env: &BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + results: Vec, + ) -> (ProviderBlock, Vec>) { + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(results.len()); + + for (tx, outcome) in transactions.iter().zip(results) { + cumulative_gas_used += outcome.gas_used(); + receipts.push(self.assemble_receipt(tx, outcome, cumulative_gas_used)); + } + + let block = + self.assemble_block(block_env, parent_hash, state_root, transactions, &receipts); + + (block, receipts) + } + /// Builds a pending block using the configured provider and pool. /// /// If the origin is the actual pending block, the block is built with withdrawals. @@ -248,7 +269,6 @@ pub trait LoadPendingBlock: let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = block_env.gas_limit.to::(); let base_fee = block_env.basefee.to::(); - let block_number = block_env.number.to::(); let mut executed_txs = Vec::new(); let mut senders = Vec::new(); @@ -266,7 +286,7 @@ pub trait LoadPendingBlock: .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; - let mut receipts = Vec::new(); + let mut results = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -374,13 +394,11 @@ pub trait LoadPendingBlock: // add gas used by the transaction to cumulative gas used, before creating the receipt cumulative_gas_used += gas_used; - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(self.assemble_receipt(&tx, result, cumulative_gas_used))); - // append transaction to the list of executed transactions let (tx, sender) = tx.to_components(); executed_txs.push(tx); senders.push(sender); + results.push(result); } // executes the withdrawals and commits them to the Database and BundleState. @@ -396,22 +414,19 @@ pub trait LoadPendingBlock: // merge all transitions into bundle state. db.merge_transitions(BundleRetention::PlainState); - let execution_outcome: ExecutionOutcome> = - ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); - let hashed_state = db.database.hashed_post_state(execution_outcome.state()); + let bundle_state = db.take_bundle(); + let hashed_state = db.database.hashed_post_state(&bundle_state); // calculate the state root let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; - // Convert Vec> to Vec - let receipts: Vec<_> = receipts.into_iter().flatten().collect(); - let block = - self.assemble_block(cfg, block_env, parent_hash, state_root, executed_txs, &receipts); + let (block, receipts) = self.assemble_block_and_receipts( + &block_env, + parent_hash, + state_root, + executed_txs, + results, + ); Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 364ea27cc31d..253aac91d8b9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -25,9 +25,7 @@ use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_blo use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use super::{ - EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, -}; +use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; use crate::{ helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, @@ -365,7 +363,7 @@ pub trait EthTransactions: LoadTransaction { mut request: TransactionRequest, ) -> impl Future> + Send where - Self: EthApiSpec + LoadBlock + LoadPendingBlock + EstimateCall, + Self: EthApiSpec + LoadBlock + EstimateCall, { async move { let from = match request.from { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index e5ccb47ba5c3..a6ea5c4b7881 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,21 +1,18 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{Transaction as _, TxType}; -use alloy_primitives::PrimitiveSignature as Signature; +use alloy_consensus::{BlockHeader, Transaction as _, TxType}; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, transaction::TransactionRequest, - Block, BlockTransactionsKind, + Block, BlockTransactionsKind, Header, }; use jsonrpsee_types::ErrorObject; -use reth_primitives::{ - proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, TransactionSigned, -}; +use reth_primitives::BlockWithSenders; +use reth_primitives_traits::{block::BlockTx, BlockBody as _, SignedTransaction}; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; use revm::Database; -use revm_primitives::{Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm_primitives::{Address, Bytes, ExecutionResult, TxKind, U256}; use crate::{ error::{api::FromEthApiError, ToRpcError}, @@ -49,17 +46,18 @@ impl ToRpcError for EthSimulateError { } /// Goes over the list of [`TransactionRequest`]s and populates missing fields trying to resolve -/// them into [`TransactionSigned`]. +/// them into primitive transactions. /// /// If validation is enabled, the function will return error if any of the transactions can't be /// built right away. -pub fn resolve_transactions( +pub fn resolve_transactions>( txs: &mut [TransactionRequest], validation: bool, block_gas_limit: u64, chain_id: u64, db: &mut DB, -) -> Result, EthApiError> + tx_resp_builder: &T, +) -> Result, EthApiError> where EthApiError: From, { @@ -125,49 +123,44 @@ where } } - let Ok(tx) = tx.clone().build_typed_tx() else { - return Err(EthApiError::TransactionConversionError) - }; - - // Create an empty signature for the transaction. - let signature = Signature::new(Default::default(), Default::default(), false); - let tx = TransactionSigned::new_unhashed(tx.into(), signature); - transactions.push(tx); + transactions.push( + tx_resp_builder + .build_simulate_v1_transaction(tx.clone()) + .map_err(|e| EthApiError::other(e.into()))?, + ); } Ok(transactions) } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -pub fn build_block>( - results: Vec<(Address, ExecutionResult)>, - transactions: Vec, - block_env: &BlockEnv, - parent_hash: B256, +#[expect(clippy::type_complexity)] +pub fn build_simulated_block( + senders: Vec

, + results: Vec, total_difficulty: U256, full_transactions: bool, tx_resp_builder: &T, -) -> Result>, T::Error> { + block: B, +) -> Result>>, T::Error> +where + T: TransactionCompat, Error: FromEthApiError>, + B: reth_primitives_traits::Block, +{ let mut calls: Vec = Vec::with_capacity(results.len()); - let mut senders = Vec::with_capacity(results.len()); - let mut receipts = Vec::with_capacity(results.len()); let mut log_index = 0; - for (transaction_index, ((sender, result), tx)) in - results.into_iter().zip(transactions.iter()).enumerate() - { - senders.push(sender); - + for (index, (result, tx)) in results.iter().zip(block.body().transactions()).enumerate() { let call = match result { ExecutionResult::Halt { reason, gas_used } => { - let error = RpcInvalidTransactionError::halt(reason, tx.gas_limit()); + let error = RpcInvalidTransactionError::halt(*reason, tx.gas_limit()); SimCallResult { return_data: Bytes::new(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used, + gas_used: *gas_used, logs: Vec::new(), status: false, } @@ -175,31 +168,31 @@ pub fn build_block>( ExecutionResult::Revert { output, gas_used } => { let error = RevertError::new(output.clone()); SimCallResult { - return_data: output, + return_data: output.clone(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used, + gas_used: *gas_used, status: false, logs: Vec::new(), } } ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult { - return_data: output.into_data(), + return_data: output.clone().into_data(), error: None, - gas_used, + gas_used: *gas_used, logs: logs - .into_iter() + .iter() .map(|log| { log_index += 1; alloy_rpc_types_eth::Log { - inner: log, + inner: log.clone(), log_index: Some(log_index - 1), - transaction_index: Some(transaction_index as u64), - transaction_hash: Some(tx.hash()), - block_number: Some(block_env.number.to()), - block_timestamp: Some(block_env.timestamp.to()), + transaction_index: Some(index as u64), + transaction_hash: Some(*tx.tx_hash()), + block_number: Some(block.header().number()), + block_timestamp: Some(block.header().timestamp()), ..Default::default() } }) @@ -208,70 +201,10 @@ pub fn build_block>( }, }; - receipts.push( - #[allow(clippy::needless_update)] - Receipt { - tx_type: tx.tx_type(), - success: call.status, - cumulative_gas_used: call.gas_used + calls.iter().map(|c| c.gas_used).sum::(), - logs: call.logs.iter().map(|log| &log.inner).cloned().collect(), - ..Default::default() - } - .with_bloom(), - ); - calls.push(call); } - // TODO: uncomment once performance cost is acceptable - // - // let mut hashed_state = HashedPostState::default(); - // for (address, account) in &db.accounts { - // let hashed_address = keccak256(address); - // hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); - - // let storage = hashed_state - // .storages - // .entry(hashed_address) - // .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - // for (slot, value) in &account.storage { - // let slot = B256::from(*slot); - // let hashed_slot = keccak256(slot); - // storage.storage.insert(hashed_slot, *value); - // } - // } - - // let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; - let state_root = B256::ZERO; - - let header = alloy_consensus::Header { - beneficiary: block_env.coinbase, - difficulty: block_env.difficulty, - number: block_env.number.to(), - timestamp: block_env.timestamp.to(), - base_fee_per_gas: Some(block_env.basefee.to()), - gas_limit: block_env.gas_limit.to(), - gas_used: calls.iter().map(|c| c.gas_used).sum(), - blob_gas_used: Some(0), - parent_hash, - receipts_root: calculate_receipt_root(&receipts), - transactions_root: calculate_transaction_root(&transactions), - state_root, - logs_bloom: alloy_primitives::logs_bloom( - receipts.iter().flat_map(|r| r.receipt.logs.iter()), - ), - mix_hash: block_env.prevrandao.unwrap_or_default(), - ..Default::default() - }; - - let block = BlockWithSenders { - block: reth_primitives::Block { - header, - body: BlockBody { transactions, ..Default::default() }, - }, - senders, - }; + let block = BlockWithSenders { block, senders }; let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index d6180ca1ee20..d3d1a71decc3 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -57,6 +57,10 @@ pub trait TransactionCompat: tx_inf: TransactionInfo, ) -> Result; + /// Builds a fake transaction from a transaction request for inclusion into block built in + /// `eth_simulateV1`. + fn build_simulate_v1_transaction(&self, request: TransactionRequest) -> Result; + /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar // . diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index bddd2b1b8fcb..2620165b9079 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -4,13 +4,14 @@ use crate::EthApi; use alloy_consensus::Header; use reth_evm::ConfigureEvm; use reth_provider::{BlockReader, ProviderHeader}; -use reth_rpc_eth_api::helpers::{ - estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking, +use reth_rpc_eth_api::{ + helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + FullEthApiTypes, }; impl EthCall for EthApi where - Self: EstimateCall + LoadPendingBlock, + Self: EstimateCall + LoadPendingBlock + FullEthApiTypes, Provider: BlockReader, { } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 344f56da8499..2af82ef511b8 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -20,7 +20,7 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::PendingBlock; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId, B256}; +use revm_primitives::{BlockEnv, B256}; use crate::EthApi; @@ -56,18 +56,24 @@ where fn assemble_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + block_env: &BlockEnv, parent_hash: revm_primitives::B256, state_root: revm_primitives::B256, transactions: Vec>, receipts: &[ProviderReceipt], ) -> reth_provider::ProviderBlock { + let chain_spec = self.provider().chain_spec(); + let transactions_root = calculate_transaction_root(&transactions); let receipts_root = calculate_receipt_root_no_memo(&receipts.iter().collect::>()); let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + let timestamp = block_env.timestamp.to::(); + let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + let header = Header { parent_hash, ommers_hash: EMPTY_OMMER_ROOT_HASH, @@ -75,8 +81,7 @@ where state_root, transactions_root, receipts_root, - withdrawals_root: (cfg.handler_cfg.spec_id >= SpecId::SHANGHAI) - .then_some(EMPTY_WITHDRAWALS), + withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS), logs_bloom, timestamp: block_env.timestamp.to::(), mix_hash: block_env.prevrandao.unwrap_or_default(), @@ -86,15 +91,13 @@ where gas_limit: block_env.gas_limit.to::(), difficulty: U256::ZERO, gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), - blob_gas_used: (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then(|| { + blob_gas_used: is_cancun.then(|| { transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() }), excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), extra_data: Default::default(), - parent_beacon_block_root: (cfg.handler_cfg.spec_id >= SpecId::CANCUN) - .then_some(B256::ZERO), - requests_hash: (cfg.handler_cfg.spec_id >= SpecId::PRAGUE) - .then_some(EMPTY_REQUESTS_HASH), + parent_beacon_block_root: is_cancun.then_some(B256::ZERO), + requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH), target_blobs_per_block: None, }; @@ -107,7 +110,7 @@ where fn assemble_receipt( &self, - tx: &reth_primitives::RecoveredTx>, + tx: &ProviderTx, result: revm_primitives::ExecutionResult, cumulative_gas_used: u64, ) -> reth_provider::ProviderReceipt { diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 79fb6fcc907f..28c66967e2f5 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -2,6 +2,8 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; +use alloy_primitives::PrimitiveSignature as Signature; +use alloy_rpc_types::TransactionRequest; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{RecoveredTx, TransactionSigned}; use reth_rpc_eth_api::EthApiTypes; @@ -84,6 +86,19 @@ where }) } + fn build_simulate_v1_transaction( + &self, + request: TransactionRequest, + ) -> Result { + let Ok(tx) = request.build_typed_tx() else { + return Err(EthApiError::TransactionConversionError) + }; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + Ok(TransactionSigned::new_unhashed(tx.into(), signature)) + } + fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { let input = match &mut tx.inner { TxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, From f4ae4399da0a6dfd8127e5ba1cae75413bc2ec5d Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Mon, 9 Dec 2024 19:41:00 +0000 Subject: [PATCH 36/70] perf(trie): use entry API in sparse trie (#13240) --- crates/trie/sparse/src/trie.rs | 122 +++++++++++++++++++++------------ 1 file changed, 78 insertions(+), 44 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index b4adc8c60a52..3cc0e8703c4b 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,7 +1,7 @@ use crate::blinded::{BlindedProvider, DefaultBlindedProvider}; use alloy_primitives::{ hex, keccak256, - map::{HashMap, HashSet}, + map::{Entry, HashMap, HashSet}, B256, }; use alloy_rlp::Decodable; @@ -302,50 +302,80 @@ impl

RevealedSparseTrie

{ } } - match self.nodes.get(&path) { - // Blinded and non-existent nodes can be replaced. - Some(SparseNode::Hash(_)) | None => { - self.nodes.insert(path, SparseNode::new_branch(branch.state_mask)); - } - // Branch node already exists, or an extension node was placed where a - // branch node was before. - Some(SparseNode::Branch { .. } | SparseNode::Extension { .. }) => {} - // All other node types can't be handled. - Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { - return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) + match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Blinded nodes can be replaced. + SparseNode::Hash(_) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); } } } - TrieNode::Extension(ext) => match self.nodes.get(&path) { - Some(SparseNode::Hash(_)) | None => { - let mut child_path = path.clone(); + TrieNode::Extension(ext) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + SparseNode::Hash(_) => { + let mut child_path = entry.key().clone(); + child_path.extend_from_slice_unchecked(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); + self.reveal_node_or_hash(child_path, &ext.child)?; + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + let mut child_path = entry.key().clone(); child_path.extend_from_slice_unchecked(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::new_ext(ext.key)); - } - // Extension node already exists, or an extension node was placed where a branch - // node was before. - Some(SparseNode::Extension { .. } | SparseNode::Branch { .. }) => {} - // All other node types can't be handled. - Some(node @ (SparseNode::Empty | SparseNode::Leaf { .. })) => { - return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) } }, - TrieNode::Leaf(leaf) => match self.nodes.get(&path) { - Some(SparseNode::Hash(_)) | None => { - let mut full = path.clone(); + TrieNode::Leaf(leaf) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + SparseNode::Hash(_) => { + let mut full = entry.key().clone(); + full.extend_from_slice_unchecked(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); + self.values.insert(full, leaf.value); + } + // Left node already exists. + SparseNode::Leaf { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + let mut full = entry.key().clone(); full.extend_from_slice_unchecked(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); self.values.insert(full, leaf.value); - self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); } - // Left node already exists. - Some(SparseNode::Leaf { .. }) => {} - // All other node types can't be handled. - Some( - node @ (SparseNode::Empty | - SparseNode::Extension { .. } | - SparseNode::Branch { .. }), - ) => return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }), }, } @@ -355,16 +385,20 @@ impl

RevealedSparseTrie

{ fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { if child.len() == B256::len_bytes() + 1 { let hash = B256::from_slice(&child[1..]); - match self.nodes.get(&path) { - // Hash node with a different hash can't be handled. - Some(node @ SparseNode::Hash(previous_hash)) if previous_hash != &hash => { - return Err(SparseTrieError::Reveal { path, node: Box::new(node.clone()) }) - } - None => { - self.nodes.insert(path, SparseNode::Hash(hash)); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(SparseNode::Hash(hash)), + }) + } + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); } - // All other node types mean that it has already been revealed. - Some(_) => {} } return Ok(()) } From 3c132958d1932fd3038ee27fd66cfe39d75a078d Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 9 Dec 2024 20:52:58 +0100 Subject: [PATCH 37/70] perf(engine): add StateRootTask bench (#13212) --- crates/engine/tree/Cargo.toml | 4 + crates/engine/tree/benches/state_root_task.rs | 166 ++++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 2 +- crates/engine/tree/src/tree/root.rs | 16 +- 4 files changed, 179 insertions(+), 9 deletions(-) create mode 100644 crates/engine/tree/benches/state_root_task.rs diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 680b6933ebe6..67cb72850ae6 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -93,6 +93,10 @@ rand.workspace = true name = "channel_perf" harness = false +[[bench]] +name = "state_root_task" +harness = false + [features] test-utils = [ "reth-blockchain-tree/test-utils", diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs new file mode 100644 index 000000000000..391fd333d12f --- /dev/null +++ b/crates/engine/tree/benches/state_root_task.rs @@ -0,0 +1,166 @@ +//! Benchmark for `StateRootTask` complete workflow, including sending state +//! updates using the incoming messages sender and waiting for the final result. + +#![allow(missing_docs)] + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use reth_engine_tree::tree::root::{StateRootConfig, StateRootTask}; +use reth_evm::system_calls::OnStateHook; +use reth_primitives::{Account as RethAccount, StorageEntry}; +use reth_provider::{ + providers::ConsistentDbView, + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + HashingWriter, ProviderFactory, +}; +use reth_testing_utils::generators::{self, Rng}; +use reth_trie::TrieInput; +use revm_primitives::{ + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, HashMap, + B256, KECCAK_EMPTY, U256, +}; +use std::sync::Arc; + +#[derive(Debug, Clone)] +struct BenchParams { + num_accounts: usize, + updates_per_account: usize, + storage_slots_per_account: usize, +} + +fn create_bench_state_updates(params: &BenchParams) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec

= (0..params.num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..params.updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=params.num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + for _ in 0..params.storage_slots_per_account { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; + + state_update.insert(address, account); + } + + updates.push(state_update); + } + + updates +} + +fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) + }, + } +} + +fn setup_provider( + factory: &ProviderFactory, + state_updates: &[EvmState], +) -> Result<(), Box> { + let provider_rw = factory.provider_rw()?; + + for update in state_updates { + let account_updates = update + .iter() + .map(|(address, account)| (*address, Some(convert_revm_to_reth_account(account)))); + provider_rw.insert_account_for_hashing(account_updates)?; + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| StorageEntry { + key: B256::from(*slot), + value: value.present_value, + }); + (*address, storage_entries) + }); + provider_rw.insert_storage_for_hashing(storage_updates)?; + } + + provider_rw.commit()?; + Ok(()) +} + +fn bench_state_root(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root"); + + let scenarios = vec![ + BenchParams { num_accounts: 100, updates_per_account: 5, storage_slots_per_account: 10 }, + BenchParams { num_accounts: 1000, updates_per_account: 10, storage_slots_per_account: 20 }, + ]; + + for params in scenarios { + group.bench_with_input( + BenchmarkId::new( + "state_root_task", + format!( + "accounts_{}_updates_{}_slots_{}", + params.num_accounts, + params.updates_per_account, + params.storage_slots_per_account + ), + ), + ¶ms, + |b, params| { + b.iter_with_setup( + || { + let factory = create_test_provider_factory(); + let state_updates = create_bench_state_updates(params); + setup_provider(&factory, &state_updates).expect("failed to setup provider"); + + let trie_input = Arc::new(TrieInput::from_state(Default::default())); + + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: trie_input, + }; + + (config, state_updates) + }, + |(config, state_updates)| { + let task = StateRootTask::new(config); + let mut hook = task.state_hook(); + let handle = task.spawn(); + + for update in state_updates { + hook.on_state(&update) + } + drop(hook); + + black_box(handle.wait_for_result().expect("task failed")); + }, + ) + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_root); +criterion_main!(benches); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 763d5d990c5d..5fc07abf7a2f 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -76,7 +76,7 @@ pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; -mod root; +pub mod root; /// Keeps track of the state of the tree. /// diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index ae22b036b65f..53a881387e76 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -37,7 +37,7 @@ pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootE /// Handle to a spawned state root task. #[derive(Debug)] #[allow(dead_code)] -pub(crate) struct StateRootHandle { +pub struct StateRootHandle { /// Channel for receiving the final result. rx: mpsc::Receiver, } @@ -50,14 +50,14 @@ impl StateRootHandle { } /// Waits for the state root calculation to complete. - pub(crate) fn wait_for_result(self) -> StateRootResult { + pub fn wait_for_result(self) -> StateRootResult { self.rx.recv().expect("state root task was dropped without sending result") } } /// Common configuration for state root tasks #[derive(Debug)] -pub(crate) struct StateRootConfig { +pub struct StateRootConfig { /// View over the state in the database. pub consistent_view: ConsistentDbView, /// Latest trie input. @@ -67,7 +67,7 @@ pub(crate) struct StateRootConfig { /// Messages used internally by the state root task #[derive(Debug)] #[allow(dead_code)] -pub(crate) enum StateRootMessage { +pub enum StateRootMessage { /// New state update from transaction execution StateUpdate(EvmState), /// Proof calculation completed for a specific state update @@ -223,7 +223,7 @@ fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostState { /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. #[derive(Debug)] -pub(crate) struct StateRootTask { +pub struct StateRootTask { /// Task configuration. config: StateRootConfig, /// Receiver for state root related messages. @@ -250,7 +250,7 @@ where + 'static, { /// Creates a new state root task with the unified message channel - pub(crate) fn new(config: StateRootConfig) -> Self { + pub fn new(config: StateRootConfig) -> Self { let (tx, rx) = channel(); Self { @@ -264,7 +264,7 @@ where } /// Spawns the state root task and returns a handle to await its result. - pub(crate) fn spawn(self) -> StateRootHandle { + pub fn spawn(self) -> StateRootHandle { let (tx, rx) = mpsc::sync_channel(1); std::thread::Builder::new() .name("State Root Task".to_string()) @@ -279,7 +279,7 @@ where } /// Returns a state hook to be used to send state updates to this task. - pub(crate) fn state_hook(&self) -> impl OnStateHook { + pub fn state_hook(&self) -> impl OnStateHook { let state_hook = StateHookSender::new(self.tx.clone()); move |state: &EvmState| { From bf1688525e08cb40fe74e072a9f487ff973124b3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 21:36:07 +0100 Subject: [PATCH 38/70] chore: add blob_versioned_hashes_iter to block body (#13246) --- crates/primitives-traits/src/block/body.rs | 5 +++++ crates/primitives/src/block.rs | 21 ++++++--------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 14436ee01c58..ed60796ce1bc 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -81,6 +81,11 @@ pub trait BlockBody: .sum() } + /// Returns an iterator over all blob versioned hashes in the block body. + fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.transactions().iter().filter_map(|tx| tx.blob_versioned_hashes()).flatten() + } + /// Returns an iterator over the encoded 2718 transactions. /// /// This is also known as `raw transactions`. diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index b02456f6c486..0ee6f860b58a 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -263,12 +263,6 @@ impl SealedBlock { } impl SealedBlock { - /// Returns an iterator over all blob transactions of the block - #[inline] - pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.body.blob_transactions_iter() - } - /// Returns whether or not the block contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -280,19 +274,16 @@ impl SealedBlock { pub fn has_eip7702_transactions(&self) -> bool { self.body.has_eip7702_transactions() } +} - /// Returns only the blob transactions, if any, from the block body. - #[inline] - pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { - self.blob_transactions_iter().collect() - } - +impl SealedBlock +where + B: reth_primitives_traits::BlockBody, +{ /// Returns an iterator over all blob versioned hashes from the block body. #[inline] pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.blob_transactions_iter() - .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) - .flatten() + self.body.blob_versioned_hashes_iter() } } From af5dc60867236d01fc07554ad08408d5fc894921 Mon Sep 17 00:00:00 2001 From: angel-ding-cb <141944320+angel-ding-cb@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:43:10 -0800 Subject: [PATCH 39/70] Return a propoer error code for txpool is full error (#13245) --- crates/rpc/rpc-eth-types/src/error/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 187e2d943f70..aeea8ea5b894 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -676,6 +676,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: RpcPoolError) -> Self { match error { RpcPoolError::Invalid(err) => err.into(), + RpcPoolError::TxPoolOverflow => { + rpc_error_with_code(EthRpcErrorCode::TransactionRejected.code(), error.to_string()) + } error => internal_rpc_err(error.to_string()), } } From eb6080863bbc41e5e108de3b8f07ff52db33bc1e Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 01:48:29 +0400 Subject: [PATCH 40/70] feat(rpc): relax `VaidationApi` and `EngineApi` (#13241) --- Cargo.lock | 3 +- crates/engine/primitives/src/lib.rs | 37 ++++--- crates/ethereum/engine-primitives/src/lib.rs | 24 ++--- crates/ethereum/node/src/node.rs | 14 ++- crates/node/builder/src/rpc.rs | 34 ++++--- crates/optimism/node/src/engine.rs | 24 ++--- crates/optimism/node/src/node.rs | 30 ++++-- crates/rpc/rpc-builder/Cargo.toml | 2 - crates/rpc/rpc-builder/src/eth.rs | 3 +- crates/rpc/rpc-builder/src/lib.rs | 98 ++++++++++++------- crates/rpc/rpc-builder/tests/it/middleware.rs | 3 + crates/rpc/rpc-builder/tests/it/startup.rs | 9 +- crates/rpc/rpc-builder/tests/it/utils.rs | 21 +++- crates/rpc/rpc-engine-api/src/engine_api.rs | 24 ++--- crates/rpc/rpc-engine-api/tests/it/payload.rs | 2 +- .../rpc-types-compat/src/engine/payload.rs | 13 ++- crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/validation.rs | 65 ++++++------ examples/custom-engine-types/src/main.rs | 30 +++--- examples/rpc-db/src/main.rs | 12 ++- 20 files changed, 258 insertions(+), 192 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 58982032e30f..eabf3db01a72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8939,6 +8939,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-consensus-common", + "reth-engine-primitives", "reth-errors", "reth-ethereum-consensus", "reth-evm", @@ -8946,7 +8947,6 @@ dependencies = [ "reth-network-api", "reth-network-peers", "reth-network-types", - "reth-payload-validator", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -9020,7 +9020,6 @@ dependencies = [ name = "reth-rpc-builder" version = "1.1.2" dependencies = [ - "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 89fb7459b7de..2bd642cfa208 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -10,6 +10,8 @@ mod error; +use core::fmt; + use alloy_consensus::BlockHeader; use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use error::BeaconOnNewPayloadError; @@ -80,11 +82,28 @@ pub trait EngineTypes: + 'static; } -/// Type that validates the payloads processed by the engine. -pub trait EngineValidator: Clone + Send + Sync + Unpin + 'static { +/// Type that validates an [`ExecutionPayload`]. +pub trait PayloadValidator: fmt::Debug + Send + Sync + Unpin + 'static { /// The block type used by the engine. type Block: Block; + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError>; +} + +/// Type that validates the payloads processed by the engine. +pub trait EngineValidator: PayloadValidator { /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( @@ -100,20 +119,6 @@ pub trait EngineValidator: Clone + Send + Sync + Unpin + 'st attributes: &::PayloadAttributes, ) -> Result<(), EngineObjectValidationError>; - /// Ensures that the given payload does not violate any consensus rules that concern the block's - /// layout. - /// - /// This function must convert the payload into the executable block and pre-validate its - /// fields. - /// - /// Implementers should ensure that the checks are done in the order that conforms with the - /// engine-API specification. - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError>; - /// Validates the payload attributes with respect to the header. /// /// By default, this enforces that the payload attributes timestamp is greater than the diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index beefd54ca05b..59c870f4d288 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -18,7 +18,7 @@ pub use alloy_rpc_types_engine::{ }; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_chainspec::ChainSpec; -use reth_engine_primitives::{EngineTypes, EngineValidator}; +use reth_engine_primitives::{EngineTypes, EngineValidator, PayloadValidator}; use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, @@ -82,12 +82,22 @@ impl EthereumEngineValidator { } } +impl PayloadValidator for EthereumEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + impl EngineValidator for EthereumEngineValidator where Types: EngineTypes, { - type Block = Block; - fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -103,12 +113,4 @@ where ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, attributes.into()) } - - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - self.inner.ensure_well_formed_payload(payload, sidecar) - } } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index b2fc7e677aca..54707e69b26b 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -6,14 +6,13 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_ethereum_engine_primitives::{ - EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, EthereumEngineValidator, + EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, HeaderTy, NodeTypesWithDB, - TxTy, + AddOnsContext, ConfigureEvm, FullNodeComponents, HeaderTy, NodeTypesWithDB, TxTy, }; use reth_node_builder::{ components::{ @@ -37,6 +36,8 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; +pub use reth_ethereum_engine_primitives::EthereumEngineValidator; + /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -353,9 +354,12 @@ pub struct EthereumEngineValidatorBuilder; impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + ChainSpec = ChainSpec, + Engine = EthEngineTypes, + Primitives = EthPrimitives, + >, Node: FullNodeComponents, - EthereumEngineValidator: EngineValidator, { type Validator = EthereumEngineValidator; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index e6c9ad233568..c8e08078bb98 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -10,7 +10,7 @@ use std::{ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ - AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodePrimitives, NodeTypes, + AddOnsContext, BlockTy, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, }; use reth_node_core::{ @@ -33,6 +33,7 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use std::sync::Arc; use crate::EthApiBuilderCtx; @@ -402,15 +403,7 @@ where impl RpcAddOns where - N: FullNodeComponents< - Types: ProviderNodeTypes< - Primitives: NodePrimitives< - Block = reth_primitives::Block, - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, - >, + N: FullNodeComponents, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners @@ -449,7 +442,7 @@ where Box::new(node.task_executor().clone()), client, EngineCapabilities::default(), - engine_validator, + engine_validator.clone(), ); info!(target: "reth::cli", "Engine API handler initialized"); @@ -466,7 +459,12 @@ where .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) .with_consensus(node.consensus().clone()) - .build_with_auth_server(module_config, engine_api, eth_api_builder); + .build_with_auth_server( + module_config, + engine_api, + eth_api_builder, + Arc::new(engine_validator), + ); // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { @@ -588,7 +586,8 @@ impl>> EthApi /// Helper trait that provides the validator for the engine API pub trait EngineValidatorAddOn: Send { /// The Validator type to use for the engine API. - type Validator: EngineValidator<::Engine>; + type Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone; /// Creates the engine validator for an engine API based node. fn engine_validator( @@ -613,7 +612,8 @@ where /// A type that knows how to build the engine validator. pub trait EngineValidatorBuilder: Send + Sync + Clone { /// The consensus implementation to build. - type Validator: EngineValidator<::Engine>; + type Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone; /// Creates the engine validator. fn build( @@ -625,8 +625,10 @@ pub trait EngineValidatorBuilder: Send + Sync + Clone impl EngineValidatorBuilder for F where Node: FullNodeComponents, - Validator: - EngineValidator<::Engine> + Clone + Unpin + 'static, + Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone + + Unpin + + 'static, F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, Fut: Future> + Send, { diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 063ac3617af5..1db50b72ee80 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -12,7 +12,7 @@ use reth_node_api::{ EngineObjectValidationError, MessageValidationKind, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, EngineTypes, EngineValidator, + validate_version_specific_fields, EngineTypes, EngineValidator, PayloadValidator, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::{OpHardfork, OpHardforks}; @@ -77,12 +77,22 @@ impl OpEngineValidator { } } +impl PayloadValidator for OpEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + impl EngineValidator for OpEngineValidator where Types: EngineTypes, { - type Block = Block; - fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -136,14 +146,6 @@ where Ok(()) } - - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError> { - self.inner.ensure_well_formed_payload(payload, sidecar) - } } /// Validates the presence of the `withdrawals` field according to the payload timestamp. diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 35e33ccd75a4..54ff36dabac0 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -238,7 +238,12 @@ impl>> OpAddOn impl NodeAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + Engine = OpEngineTypes, + >, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -283,7 +288,12 @@ where impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + Engine = OpEngineTypes, + >, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -296,8 +306,13 @@ where impl EngineValidatorAddOn for OpAddOns where - N: FullNodeComponents>, - OpEngineValidator: EngineValidator<::Engine>, + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Engine = OpEngineTypes, + >, + >, { type Validator = OpEngineValidator; @@ -674,9 +689,12 @@ pub struct OpEngineValidatorBuilder; impl EngineValidatorBuilder for OpEngineValidatorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Engine = OpEngineTypes, + >, Node: FullNodeComponents, - OpEngineValidator: EngineValidator, { type Validator = OpEngineValidator; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index a0712d617b66..7dbbe7608a72 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -31,8 +31,6 @@ reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true -alloy-consensus.workspace = true - # rpc/net jsonrpsee = { workspace = true, features = ["server"] } tower-http = { workspace = true, features = ["full"] } diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 2a6744e7b18e..7339c7089e59 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,4 +1,3 @@ -use alloy_consensus::Header; use reth_evm::ConfigureEvm; use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; @@ -62,7 +61,7 @@ where >, ) -> Self where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Tasks: TaskSpawner + Clone + 'static, { let cache = EthStateCache::spawn_with(provider.clone(), config.cache, executor.clone()); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 1220020504b0..ce29b77f09d6 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,10 +16,10 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` -//! use alloy_consensus::Header; +//! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::TransactionSigned; +//! use reth_primitives::{Header, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -27,8 +27,18 @@ //! }; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{PoolTransaction, TransactionPool}; +//! use std::sync::Arc; //! -//! pub async fn launch( +//! pub async fn launch< +//! Provider, +//! Pool, +//! Network, +//! Events, +//! EvmConfig, +//! BlockExecutor, +//! Consensus, +//! Validator, +//! >( //! provider: Provider, //! pool: Pool, //! network: Network, @@ -36,6 +46,7 @@ //! evm_config: EvmConfig, //! block_executor: BlockExecutor, //! consensus: Consensus, +//! validator: Validator, //! ) where //! Provider: FullRpcProvider< //! Transaction = TransactionSigned, @@ -53,6 +64,7 @@ //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -71,7 +83,7 @@ //! block_executor, //! consensus, //! ) -//! .build(transports, Box::new(EthApi::with_spawner)); +//! .build(transports, Box::new(EthApi::with_spawner), Arc::new(validator)); //! let handle = RpcServerConfig::default() //! .with_http(ServerBuilder::default()) //! .start(&transport_modules) @@ -83,11 +95,10 @@ //! //! //! ``` -//! use alloy_consensus::Header; -//! use reth_engine_primitives::EngineTypes; +//! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::TransactionSigned; +//! use reth_primitives::{Header, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -98,6 +109,7 @@ //! use reth_rpc_layer::JwtSecret; //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::{PoolTransaction, TransactionPool}; +//! use std::sync::Arc; //! use tokio::try_join; //! //! pub async fn launch< @@ -110,6 +122,7 @@ //! EvmConfig, //! BlockExecutor, //! Consensus, +//! Validator, //! >( //! provider: Provider, //! pool: Pool, @@ -119,6 +132,7 @@ //! evm_config: EvmConfig, //! block_executor: BlockExecutor, //! consensus: Consensus, +//! validator: Validator, //! ) where //! Provider: FullRpcProvider< //! Transaction = TransactionSigned, @@ -138,6 +152,7 @@ //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -158,8 +173,12 @@ //! ); //! //! // configure the server modules -//! let (modules, auth_module, _registry) = -//! builder.build_with_auth_server(transports, engine_api, Box::new(EthApi::with_spawner)); +//! let (modules, auth_module, _registry) = builder.build_with_auth_server( +//! transports, +//! engine_api, +//! Box::new(EthApi::with_spawner), +//! Arc::new(validator), +//! ); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); @@ -187,7 +206,6 @@ use std::{ }; use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; -use alloy_consensus::Header; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -201,7 +219,7 @@ use jsonrpsee::{ }; use reth_chainspec::EthereumHardforks; use reth_consensus::FullConsensus; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives::NodePrimitives; @@ -274,6 +292,7 @@ pub async fn launch, block_executor: BlockExecutor, consensus: Arc>, + payload_validator: Arc>, ) -> Result where Provider: FullRpcProvider< @@ -297,12 +316,7 @@ where Header = ::BlockHeader, >, >, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + BlockExecutor: BlockExecutorProvider, { let module_config = module_config.into(); server_config @@ -318,7 +332,7 @@ where block_executor, consensus, ) - .build(module_config, eth), + .build(module_config, eth, payload_validator), ) .await } @@ -651,6 +665,7 @@ where Provider: FullRpcProvider< Block = ::Block, Receipt = ::Receipt, + Header = ::BlockHeader, > + AccountReader + ChangeSetReader, Pool: TransactionPool + 'static, @@ -661,12 +676,7 @@ where Header = ::BlockHeader, Transaction = ::SignedTx, >, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can @@ -681,6 +691,7 @@ where module_config: TransportRpcModuleConfig, engine: EngineApi, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> ( TransportRpcModules, AuthRpcModule, @@ -721,6 +732,7 @@ where evm_config, eth, block_executor, + payload_validator, ); let modules = registry.create_transport_rpc_modules(module_config); @@ -738,21 +750,24 @@ where /// # Example /// /// ```no_run - /// use alloy_consensus::Header; /// use reth_consensus::noop::NoopConsensus; + /// use reth_engine_primitives::PayloadValidator; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; - /// use reth_primitives::TransactionSigned; + /// use reth_primitives::{Header, TransactionSigned}; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; + /// use std::sync::Arc; /// - /// fn init + 'static>( - /// evm: Evm, - /// ) { + /// fn init(evm: Evm, validator: Validator) + /// where + /// Evm: ConfigureEvm
+ 'static, + /// Validator: PayloadValidator + 'static, + /// { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) /// .with_pool(NoopTransactionPool::default()) @@ -762,7 +777,7 @@ where /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) /// .with_consensus(NoopConsensus::default()) - /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); + /// .into_registry(Default::default(), Box::new(EthApi::with_spawner), Arc::new(validator)); /// /// let eth_api = registry.eth_api(); /// } @@ -771,6 +786,7 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, @@ -796,6 +812,7 @@ where evm_config, eth, block_executor, + payload_validator, ) } @@ -805,6 +822,7 @@ where self, module_config: TransportRpcModuleConfig, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> TransportRpcModules<()> where EthApi: FullEthApiServer< @@ -843,6 +861,7 @@ where evm_config, eth, block_executor, + payload_validator, ); modules.config = module_config; @@ -957,6 +976,7 @@ pub struct RpcRegistryInner< events: Events, block_executor: BlockExecutor, consensus: Consensus, + payload_validator: Arc>, /// Holds the configuration for the RPC modules config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers @@ -1008,9 +1028,10 @@ where EthApi, >, block_executor: BlockExecutor, + payload_validator: Arc>, ) -> Self where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests); @@ -1037,6 +1058,7 @@ where blocking_pool_guard, events, block_executor, + payload_validator, } } } @@ -1320,6 +1342,7 @@ where pub fn validation_api(&self) -> ValidationApi where Consensus: reth_consensus::FullConsensus + Clone + 'static, + Provider: BlockReader::Block>, { ValidationApi::new( self.provider.clone(), @@ -1327,6 +1350,7 @@ where self.block_executor.clone(), self.config.flashbots.clone(), Box::new(self.executor.clone()), + self.payload_validator.clone(), ) } } @@ -1334,7 +1358,9 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider::Block> + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, @@ -1346,12 +1372,7 @@ where Header = ::BlockHeader, >, >, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the @@ -1500,6 +1521,7 @@ where self.block_executor.clone(), self.config.flashbots.clone(), Box::new(self.executor.clone()), + self.payload_validator.clone(), ) .into_rpc() .into(), diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 96d818ed4f94..0e0bb80c08b9 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -5,6 +5,8 @@ use jsonrpsee::{ types::Request, MethodResponse, }; +use reth_chainspec::MAINNET; +use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; @@ -63,6 +65,7 @@ async fn test_rpc_middleware() { let modules = builder.build( TransportRpcModuleConfig::set_http(RpcModuleSelection::All), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let mylayer = MyMiddlewareLayer::default(); diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index 9f6961fbba0d..ac53b014956a 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -1,7 +1,9 @@ //! Startup tests -use std::io; +use std::{io, sync::Arc}; +use reth_chainspec::MAINNET; +use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{ error::{RpcError, ServerKind, WsHttpSamePortError}, @@ -30,6 +32,7 @@ async fn test_http_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; @@ -45,6 +48,7 @@ async fn test_ws_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); @@ -66,6 +70,7 @@ async fn test_launch_same_port_different_modules() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -88,6 +93,7 @@ async fn test_launch_same_port_same_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -108,6 +114,7 @@ async fn test_launch_same_port_different_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 175992c0f14b..be708dac5f89 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,4 +1,7 @@ -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, +}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; @@ -61,8 +64,11 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { /// Launches a new server with http only with the given modules pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = - builder.build(TransportRpcModuleConfig::set_http(modules), Box::new(EthApi::with_spawner)); + let server = builder.build( + TransportRpcModuleConfig::set_http(modules), + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), + ); RpcServerConfig::http(Default::default()) .with_http_address(test_address()) .start(&server) @@ -73,8 +79,11 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan /// Launches a new server with ws only with the given modules pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules), Box::new(EthApi::with_spawner)); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules), + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), + ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) .start(&server) @@ -89,6 +98,7 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) @@ -107,6 +117,7 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8b57cb1f19ee..2e80c105e7e6 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -25,7 +25,7 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{Block, EthereumHardfork}; +use reth_primitives::EthereumHardfork; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -80,11 +80,7 @@ struct EngineApiInner EngineApi where - Provider: HeaderProvider - + BlockReader - + StateProviderFactory - + EvmEnvProvider - + 'static, + Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -573,7 +569,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let len = hashes.len() as u64; @@ -748,11 +744,7 @@ where impl EngineApiServer for EngineApi where - Provider: HeaderProvider - + BlockReader - + StateProviderFactory - + EvmEnvProvider - + 'static, + Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -1045,7 +1037,7 @@ mod tests { use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::SealedBlock; + use reth_primitives::{Block, SealedBlock}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; @@ -1171,7 +1163,7 @@ mod tests { let expected = blocks .iter() .cloned() - .map(|b| Some(convert_to_payload_body_v1(b.unseal()))) + .map(|b| Some(convert_to_payload_body_v1(b.unseal::()))) .collect::>(); let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); @@ -1213,7 +1205,7 @@ mod tests { if first_missing_range.contains(&b.number) { None } else { - Some(convert_to_payload_body_v1(b.unseal())) + Some(convert_to_payload_body_v1(b.unseal::())) } }) .collect::>(); @@ -1232,7 +1224,7 @@ mod tests { { None } else { - Some(convert_to_payload_body_v1(b.unseal())) + Some(convert_to_payload_body_v1(b.unseal::())) } }) .collect::>(); diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 78b0351d4a5c..363c816d240c 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -38,7 +38,7 @@ fn payload_body_roundtrip() { 0..=99, BlockRangeParams { tx_count: 0..2, ..Default::default() }, ) { - let unsealed = block.clone().unseal(); + let unsealed = block.clone().unseal::(); let payload_body: ExecutionPayloadBodyV1 = convert_to_payload_body_v1(unsealed); assert_eq!( diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index f504d57addc7..3be7835a35ae 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -16,6 +16,7 @@ use reth_primitives::{ proofs::{self}, Block, BlockBody, BlockExt, SealedBlock, TransactionSigned, }; +use reth_primitives_traits::BlockBody as _; /// Converts [`ExecutionPayloadV1`] to [`Block`] pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result { @@ -320,15 +321,13 @@ pub fn validate_block_hash( } /// Converts [`Block`] to [`ExecutionPayloadBodyV1`] -pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { - let transactions = value.body.transactions.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_2718(&mut out); - out.into() - }); +pub fn convert_to_payload_body_v1( + value: impl reth_primitives_traits::Block, +) -> ExecutionPayloadBodyV1 { + let transactions = value.body().transactions().iter().map(|tx| tx.encoded_2718().into()); ExecutionPayloadBodyV1 { transactions: transactions.collect(), - withdrawals: value.body.withdrawals.map(Withdrawals::into_inner), + withdrawals: value.body().withdrawals().cloned().map(Withdrawals::into_inner), } } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 5efae46f0061..14519860e76e 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,6 +18,7 @@ reth-primitives = { workspace = true, features = ["secp256k1"] } reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true +reth-engine-primitives.workspace = true reth-errors.workspace = true reth-ethereum-consensus.workspace = true reth-provider.workspace = true @@ -35,7 +36,6 @@ reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-types.workspace = true reth-consensus.workspace = true -reth-payload-validator.workspace = true # ethereum alloy-consensus.workspace = true diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index b13e99eb21c3..a7042126cba5 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -14,10 +14,10 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; +use reth_engine_primitives::PayloadValidator; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_primitives_traits::{Block as _, BlockBody}; use reth_provider::{ @@ -34,14 +34,13 @@ use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait #[derive(Clone, Debug, derive_more::Deref)] -pub struct ValidationApi { +pub struct ValidationApi { #[deref] inner: Arc>, } impl ValidationApi where - Provider: ChainSpecProvider, E: BlockExecutorProvider, { /// Create a new instance of the [`ValidationApi`] @@ -51,10 +50,12 @@ where executor_provider: E, config: ValidationApiConfig, task_spawner: Box, + payload_validator: Arc< + dyn PayloadValidator::Block>, + >, ) -> Self { let ValidationApiConfig { disallow } = config; - let payload_validator = ExecutionPayloadValidator::new(provider.chain_spec()); let inner = Arc::new(ValidationApiInner { provider, consensus, @@ -91,16 +92,11 @@ where impl ValidationApi where - Provider: BlockReaderIdExt
+ Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = Provider::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( @@ -116,8 +112,8 @@ where self.consensus.validate_block_pre_execution(&block)?; if !self.disallow.is_empty() { - if self.disallow.contains(&block.beneficiary) { - return Err(ValidationApiError::Blacklist(block.beneficiary)) + if self.disallow.contains(&block.beneficiary()) { + return Err(ValidationApiError::Blacklist(block.beneficiary())) } if self.disallow.contains(&message.proposer_fee_recipient) { return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) @@ -137,9 +133,9 @@ where let latest_header = self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; - if latest_header.hash() != block.header.parent_hash { + if latest_header.hash() != block.header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: block.header.parent_hash, expected: latest_header.hash() } + GotExpected { got: block.header.parent_hash(), expected: latest_header.hash() } .into(), ) .into()) @@ -200,7 +196,7 @@ where /// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeader`]. fn validate_message_against_header( &self, - header: &SealedHeader, + header: &SealedHeader<::BlockHeader>, message: &BidTrace, ) -> Result<(), ValidationApiError> { if header.hash() != message.block_hash { @@ -208,20 +204,20 @@ where got: message.block_hash, expected: header.hash(), })) - } else if header.parent_hash != message.parent_hash { + } else if header.parent_hash() != message.parent_hash { Err(ValidationApiError::ParentHashMismatch(GotExpected { got: message.parent_hash, - expected: header.parent_hash, + expected: header.parent_hash(), })) - } else if header.gas_limit != message.gas_limit { + } else if header.gas_limit() != message.gas_limit { Err(ValidationApiError::GasLimitMismatch(GotExpected { got: message.gas_limit, - expected: header.gas_limit, + expected: header.gas_limit(), })) - } else if header.gas_used != message.gas_used { + } else if header.gas_used() != message.gas_used { return Err(ValidationApiError::GasUsedMismatch(GotExpected { got: message.gas_used, - expected: header.gas_used, + expected: header.gas_used(), })) } else { Ok(()) @@ -235,20 +231,20 @@ where fn validate_gas_limit( &self, registered_gas_limit: u64, - parent_header: &SealedHeader, - header: &SealedHeader, + parent_header: &SealedHeader<::BlockHeader>, + header: &SealedHeader<::BlockHeader>, ) -> Result<(), ValidationApiError> { let max_gas_limit = - parent_header.gas_limit + parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR - 1; + parent_header.gas_limit() + parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR - 1; let min_gas_limit = - parent_header.gas_limit - parent_header.gas_limit / GAS_LIMIT_BOUND_DIVISOR + 1; + parent_header.gas_limit() - parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR + 1; let best_gas_limit = std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit)); - if best_gas_limit != header.gas_limit { + if best_gas_limit != header.gas_limit() { return Err(ValidationApiError::GasLimitMismatch(GotExpected { - got: header.gas_limit, + got: header.gas_limit(), expected: best_gas_limit, })) } @@ -409,17 +405,12 @@ where #[async_trait] impl BlockSubmissionValidationApiServer for ValidationApi where - Provider: BlockReaderIdExt
+ Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider + StateProviderFactory + Clone + 'static, - E: BlockExecutorProvider< - Primitives: NodePrimitives< - BlockHeader = Provider::Header, - BlockBody = reth_primitives::BlockBody, - >, - >, + E: BlockExecutorProvider, { async fn validate_builder_submission_v1( &self, @@ -473,13 +464,13 @@ where } #[derive(Debug)] -pub struct ValidationApiInner { +pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. consensus: Arc>, /// Execution payload validator. - payload_validator: ExecutionPayloadValidator, + payload_validator: Arc::Block>>, /// Block executor factory. executor_provider: E, /// Set of disallowed addresses diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 1034effebf84..f30956d8f5cf 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -55,7 +55,7 @@ use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, - FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, + FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, PayloadValidator, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ @@ -189,12 +189,22 @@ impl CustomEngineValidator { } } +impl PayloadValidator for CustomEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + impl EngineValidator for CustomEngineValidator where T: EngineTypes, { - type Block = Block; - fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, @@ -220,14 +230,6 @@ where Ok(()) } - fn ensure_well_formed_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError> { - self.inner.ensure_well_formed_payload(payload, sidecar) - } - fn validate_payload_attributes_against_header( &self, _attr: &::PayloadAttributes, @@ -246,7 +248,11 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where N: FullNodeComponents< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, >, { type Validator = CustomEngineValidator; diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 92ae86f00bb9..cde891036e6a 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -34,7 +34,9 @@ use reth::rpc::builder::{ // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_node_ethereum::{ + node::EthereumEngineValidator, EthEvmConfig, EthExecutorProvider, EthereumNode, +}; use reth_provider::{test_utils::TestCanonStateSubscriptions, ChainSpecProvider}; // Custom rpc extension @@ -70,11 +72,15 @@ async fn main() -> eyre::Result<()> { .with_evm_config(EthEvmConfig::new(spec.clone())) .with_events(TestCanonStateSubscriptions::default()) .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())) - .with_consensus(EthBeaconConsensus::new(spec)); + .with_consensus(EthBeaconConsensus::new(spec.clone())); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build(config, Box::new(EthApi::with_spawner)); + let mut server = rpc_builder.build( + config, + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(spec)), + ); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; From 386e4b3ebd6ac87528e32bc5789b8c35d2b5f9f0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 9 Dec 2024 23:19:50 +0100 Subject: [PATCH 41/70] feat: extract zstd compressors (#13250) --- Cargo.lock | 9 +- Cargo.toml | 2 + crates/primitives/Cargo.toml | 7 +- crates/primitives/src/lib.rs | 4 - crates/primitives/src/receipt.rs | 4 +- crates/primitives/src/transaction/mod.rs | 8 +- crates/storage/zstd-compressors/Cargo.toml | 19 ++++ .../zstd-compressors}/receipt_dictionary.bin | Bin .../zstd-compressors/src/lib.rs} | 82 +++++++++++------- .../transaction_dictionary.bin | Bin 10 files changed, 90 insertions(+), 45 deletions(-) create mode 100644 crates/storage/zstd-compressors/Cargo.toml rename crates/{primitives/src/compression => storage/zstd-compressors}/receipt_dictionary.bin (100%) rename crates/{primitives/src/compression/mod.rs => storage/zstd-compressors/src/lib.rs} (62%) rename crates/{primitives/src/compression => storage/zstd-compressors}/transaction_dictionary.bin (100%) diff --git a/Cargo.lock b/Cargo.lock index eabf3db01a72..3de11a253bd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8743,6 +8743,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-trie-common", + "reth-zstd-compressors", "revm-primitives", "rstest", "secp256k1", @@ -8750,7 +8751,6 @@ dependencies = [ "serde_json", "serde_with", "test-fuzz", - "zstd", ] [[package]] @@ -9646,6 +9646,13 @@ dependencies = [ "thiserror 2.0.5", ] +[[package]] +name = "reth-zstd-compressors" +version = "1.1.2" +dependencies = [ + "zstd", +] + [[package]] name = "revm" version = "18.0.0" diff --git a/Cargo.toml b/Cargo.toml index 142b00290b9a..beef7d330dd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,6 +118,7 @@ members = [ "crates/storage/nippy-jar/", "crates/storage/provider/", "crates/storage/storage-api/", + "crates/storage/zstd-compressors/", "crates/tasks/", "crates/tokio-util/", "crates/tracing/", @@ -422,6 +423,7 @@ reth-trie-common = { path = "crates/trie/common" } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } reth-trie-sparse = { path = "crates/trie/sparse" } +reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-features = false } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 2f8f37bcd356..109b20ec2bcc 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -18,6 +18,7 @@ reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } +reth-zstd-compressors = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true @@ -55,7 +56,6 @@ rand = { workspace = true, optional = true } rayon.workspace = true serde.workspace = true serde_with = { workspace = true, optional = true } -zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -108,11 +108,12 @@ std = [ "alloy-rlp/std", "reth-ethereum-forks/std", "bytes/std", - "derive_more/std" + "derive_more/std", + "reth-zstd-compressors?/std" ] reth-codec = [ "dep:reth-codecs", - "dep:zstd", + "dep:reth-zstd-compressors", "dep:modular-bitfield", "std", "reth-primitives-traits/reth-codec", ] diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index edbc73a9362d..18fe1498b8a8 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -27,8 +27,6 @@ pub use traits::*; #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; -#[cfg(feature = "reth-codec")] -mod compression; pub mod proofs; mod receipt; pub use reth_static_file_types as static_file; @@ -38,8 +36,6 @@ pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, }; -#[cfg(feature = "reth-codec")] -pub use compression::*; pub use receipt::{gas_spent_by_transactions, Receipt, Receipts}; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 419c36c2080b..62c664e22a46 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -12,9 +12,9 @@ use derive_more::{DerefMut, From, IntoIterator}; use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; -#[cfg(feature = "reth-codec")] -use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; use crate::TxType; +#[cfg(feature = "reth-codec")] +use reth_zstd_compressors::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). pub use reth_primitives_traits::receipt::gas_spent_by_transactions; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 670ee7f352ef..b64cf094042e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1365,14 +1365,14 @@ impl reth_codecs::Compact for TransactionSigned { let tx_bits = if zstd_bit { let mut tmp = Vec::with_capacity(256); if cfg!(feature = "std") { - crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { + reth_zstd_compressors::TRANSACTION_COMPRESSOR.with(|compressor| { let mut compressor = compressor.borrow_mut(); let tx_bits = self.transaction.to_compact(&mut tmp); buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); tx_bits as u8 }) } else { - let mut compressor = crate::compression::create_tx_compressor(); + let mut compressor = reth_zstd_compressors::create_tx_compressor(); let tx_bits = self.transaction.to_compact(&mut tmp); buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); tx_bits as u8 @@ -1399,7 +1399,7 @@ impl reth_codecs::Compact for TransactionSigned { let zstd_bit = bitflags >> 3; let (transaction, buf) = if zstd_bit != 0 { if cfg!(feature = "std") { - crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { + reth_zstd_compressors::TRANSACTION_DECOMPRESSOR.with(|decompressor| { let mut decompressor = decompressor.borrow_mut(); // TODO: enforce that zstd is only present at a "top" level type @@ -1411,7 +1411,7 @@ impl reth_codecs::Compact for TransactionSigned { (transaction, buf) }) } else { - let mut decompressor = crate::compression::create_tx_decompressor(); + let mut decompressor = reth_zstd_compressors::create_tx_decompressor(); let transaction_type = (bitflags & 0b110) >> 1; let (transaction, _) = Transaction::from_compact(decompressor.decompress(buf), transaction_type); diff --git a/crates/storage/zstd-compressors/Cargo.toml b/crates/storage/zstd-compressors/Cargo.toml new file mode 100644 index 000000000000..357684f32fc1 --- /dev/null +++ b/crates/storage/zstd-compressors/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "reth-zstd-compressors" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +description = "Commonly used zstd compressors." + +[lints] +workspace = true + +[dependencies] +zstd = { workspace = true, features = ["experimental"] } + +[features] +default = ["std"] +std = [] \ No newline at end of file diff --git a/crates/primitives/src/compression/receipt_dictionary.bin b/crates/storage/zstd-compressors/receipt_dictionary.bin similarity index 100% rename from crates/primitives/src/compression/receipt_dictionary.bin rename to crates/storage/zstd-compressors/receipt_dictionary.bin diff --git a/crates/primitives/src/compression/mod.rs b/crates/storage/zstd-compressors/src/lib.rs similarity index 62% rename from crates/primitives/src/compression/mod.rs rename to crates/storage/zstd-compressors/src/lib.rs index ecceafc20682..d5167120bc76 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/storage/zstd-compressors/src/lib.rs @@ -1,41 +1,61 @@ +//! Commonly used zstd [`Compressor`] and [`Decompressor`] for reth types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use crate::alloc::string::ToString; use alloc::vec::Vec; -use core::cell::RefCell; use zstd::bulk::{Compressor, Decompressor}; /// Compression/Decompression dictionary for `Receipt`. -pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("./receipt_dictionary.bin"); +pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("../receipt_dictionary.bin"); /// Compression/Decompression dictionary for `Transaction`. -pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("./transaction_dictionary.bin"); +pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("../transaction_dictionary.bin"); -// We use `thread_local` compressors and decompressors because dictionaries can be quite big, and -// zstd-rs recommends to use one context/compressor per thread #[cfg(feature = "std")] -std::thread_local! { - /// Thread Transaction compressor. - pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( - Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) - .expect("failed to initialize transaction compressor"), - ); - - /// Thread Transaction decompressor. - pub static TRANSACTION_DECOMPRESSOR: RefCell = - RefCell::new(ReusableDecompressor::new( - Decompressor::with_dictionary(TRANSACTION_DICTIONARY) - .expect("failed to initialize transaction decompressor"), - )); - - /// Thread receipt compressor. - pub static RECEIPT_COMPRESSOR: RefCell> = RefCell::new( - Compressor::with_dictionary(0, RECEIPT_DICTIONARY) - .expect("failed to initialize receipt compressor"), - ); - - /// Thread receipt decompressor. - pub static RECEIPT_DECOMPRESSOR: RefCell = - RefCell::new(ReusableDecompressor::new( - Decompressor::with_dictionary(RECEIPT_DICTIONARY) - .expect("failed to initialize receipt decompressor"), - )); +pub use locals::*; +#[cfg(feature = "std")] +mod locals { + use super::*; + use core::cell::RefCell; + + // We use `thread_local` compressors and decompressors because dictionaries can be quite big, + // and zstd-rs recommends to use one context/compressor per thread + std::thread_local! { + /// Thread Transaction compressor. + pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( + Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) + .expect("failed to initialize transaction compressor"), + ); + + /// Thread Transaction decompressor. + pub static TRANSACTION_DECOMPRESSOR: RefCell = + RefCell::new(ReusableDecompressor::new( + Decompressor::with_dictionary(TRANSACTION_DICTIONARY) + .expect("failed to initialize transaction decompressor"), + )); + + /// Thread receipt compressor. + pub static RECEIPT_COMPRESSOR: RefCell> = RefCell::new( + Compressor::with_dictionary(0, RECEIPT_DICTIONARY) + .expect("failed to initialize receipt compressor"), + ); + + /// Thread receipt decompressor. + pub static RECEIPT_DECOMPRESSOR: RefCell = + RefCell::new(ReusableDecompressor::new( + Decompressor::with_dictionary(RECEIPT_DICTIONARY) + .expect("failed to initialize receipt decompressor"), + )); + } } /// Fn creates tx [`Compressor`] diff --git a/crates/primitives/src/compression/transaction_dictionary.bin b/crates/storage/zstd-compressors/transaction_dictionary.bin similarity index 100% rename from crates/primitives/src/compression/transaction_dictionary.bin rename to crates/storage/zstd-compressors/transaction_dictionary.bin From 980e62a5b8c047ec54d2872079481fff42a7c9db Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 04:17:34 +0400 Subject: [PATCH 42/70] chore: relax `ProviderFactory` setup (#13254) --- crates/cli/commands/src/common.rs | 6 ++--- .../cli/commands/src/stage/dump/execution.rs | 11 +++------ crates/cli/commands/src/stage/dump/merkle.rs | 2 +- crates/cli/commands/src/stage/unwind.rs | 2 +- crates/engine/local/src/service.rs | 4 ++-- crates/engine/service/src/service.rs | 4 ++-- crates/engine/tree/src/persistence.rs | 21 +++++++--------- crates/evm/src/noop.rs | 24 +++++++++---------- crates/net/downloaders/src/bodies/noop.rs | 13 +++++----- crates/net/downloaders/src/headers/noop.rs | 14 +++++------ crates/node/builder/src/launch/common.rs | 20 ++++------------ crates/node/builder/src/launch/engine.rs | 15 ++++-------- crates/node/builder/src/launch/exex.rs | 10 ++++---- 13 files changed, 61 insertions(+), 85 deletions(-) diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 174eeffa396e..e206715fc01d 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -2,10 +2,10 @@ use alloy_primitives::B256; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::noop::NoopConsensus; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -151,10 +151,10 @@ impl EnvironmentArgs { .add_stages(DefaultStages::new( factory.clone(), tip_rx, - Arc::new(EthBeaconConsensus::new(self.chain.clone())), + Arc::new(NoopConsensus::default()), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), - NoopBlockExecutorProvider::default(), + NoopBlockExecutorProvider::::default(), config.stages.clone(), prune_modes.clone(), )) diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 73d2e8a9f8f0..1460c6bb6f67 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -140,13 +140,7 @@ fn import_tables_with_range( /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. fn unwind_and_copy< - N: ProviderNodeTypes< - Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, - >, + N: ProviderNodeTypes>, >( db_tool: &DbTool, from: u64, @@ -155,7 +149,8 @@ fn unwind_and_copy< ) -> eyre::Result<()> { let provider = db_tool.provider_factory.database_provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_executor(NoopBlockExecutorProvider::default()); + let mut exec_stage = + ExecutionStage::new_with_executor(NoopBlockExecutorProvider::::default()); exec_stage.unwind( &provider, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index 59a25c492aa8..f0dbb1a1fafb 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -112,7 +112,7 @@ fn unwind_and_copy< // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - NoopBlockExecutorProvider::default(), // Not necessary for unwinding. + NoopBlockExecutorProvider::::default(), // Not necessary for unwinding. ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index de535d65508a..cc5d719d2708 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -120,7 +120,7 @@ impl> Command let (tip_tx, tip_rx) = watch::channel(B256::ZERO); // Unwinding does not require a valid executor - let executor = NoopBlockExecutorProvider::default(); + let executor = NoopBlockExecutorProvider::::default(); let builder = if self.offline { Pipeline::::builder().add_stages( diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 6ce588a8264b..0bdc77dbe4b1 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -27,7 +27,7 @@ use reth_engine_tree::{ EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, RequestHandlerEvent, }, - persistence::{PersistenceHandle, PersistenceNodeTypes}, + persistence::PersistenceHandle, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; @@ -59,7 +59,7 @@ where impl LocalEngineService where - N: EngineNodeTypes + PersistenceNodeTypes, + N: EngineNodeTypes, { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index d839fab2c0e2..27de4a63605d 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -8,7 +8,7 @@ use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, - persistence::{PersistenceHandle, PersistenceNodeTypes}, + persistence::PersistenceHandle, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ @@ -59,7 +59,7 @@ where impl EngineService where - N: EngineNodeTypes + PersistenceNodeTypes, + N: EngineNodeTypes, Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 2f0b20f02dca..c7ad41100866 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,4 +1,5 @@ use crate::metrics::PersistenceMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; @@ -17,11 +18,6 @@ use thiserror::Error; use tokio::sync::oneshot; use tracing::{debug, error}; -/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within -/// [`PersistenceService`]. -pub trait PersistenceNodeTypes: ProviderNodeTypes {} -impl PersistenceNodeTypes for T where T: ProviderNodeTypes {} - /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, @@ -32,7 +28,7 @@ impl PersistenceNodeTypes for T where T: ProviderNodeTypes where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { /// The provider factory to use provider: ProviderFactory, @@ -48,7 +44,7 @@ where impl PersistenceService where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { /// Create a new persistence service pub fn new( @@ -74,7 +70,7 @@ where impl PersistenceService where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { /// This is the main loop, that will listen to database events and perform the requested /// database actions @@ -148,9 +144,10 @@ where ) -> Result, PersistenceError> { debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); - let last_block_hash_num = blocks - .last() - .map(|block| BlockNumHash { hash: block.block().hash(), number: block.block().number }); + let last_block_hash_num = blocks.last().map(|block| BlockNumHash { + hash: block.block().hash(), + number: block.block().header().number(), + }); if last_block_hash_num.is_some() { let provider_rw = self.provider.database_provider_rw()?; @@ -219,7 +216,7 @@ impl PersistenceHandle { sync_metrics_tx: MetricEventsSender, ) -> PersistenceHandle where - N: PersistenceNodeTypes, + N: ProviderNodeTypes, { // create the initial channels let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 7b1063533da3..816a4c835644 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; +use reth_primitives::{BlockWithSenders, NodePrimitives}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -20,10 +20,10 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; /// A [`BlockExecutorProvider`] implementation that does nothing. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct NoopBlockExecutorProvider; +pub struct NoopBlockExecutorProvider

(core::marker::PhantomData

); -impl BlockExecutorProvider for NoopBlockExecutorProvider { - type Primitives = EthPrimitives; +impl BlockExecutorProvider for NoopBlockExecutorProvider

{ + type Primitives = P; type Executor + Display>> = Self; @@ -33,20 +33,20 @@ impl BlockExecutorProvider for NoopBlockExecutorProvider { where DB: Database + Display>, { - Self + Self::default() } fn batch_executor(&self, _: DB) -> Self::BatchExecutor where DB: Database + Display>, { - Self + Self::default() } } -impl Executor for NoopBlockExecutorProvider { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; +impl Executor for NoopBlockExecutorProvider

{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; fn execute(self, _: Self::Input<'_>) -> Result { @@ -76,9 +76,9 @@ impl Executor for NoopBlockExecutorProvider { } } -impl BatchExecutor for NoopBlockExecutorProvider { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; +impl BatchExecutor for NoopBlockExecutorProvider

{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = ExecutionOutcome; type Error = BlockExecutionError; fn execute_and_verify_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index f311a242c20d..dd3e6e9691b9 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,24 +4,23 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; -use reth_primitives::BlockBody; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopBodiesDownloader; +pub struct NoopBodiesDownloader(std::marker::PhantomData); -impl BodyDownloader for NoopBodiesDownloader { - type Body = BlockBody; +impl BodyDownloader for NoopBodiesDownloader { + type Body = B; fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } -impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; +impl Stream for NoopBodiesDownloader { + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 58da73123878..e9dee56dd2e1 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,28 +1,28 @@ -use alloy_consensus::Header; use futures::Stream; use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, error::HeadersDownloaderError, }; use reth_primitives::SealedHeader; +use std::fmt::Debug; /// A [`HeaderDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopHeaderDownloader; +pub struct NoopHeaderDownloader(std::marker::PhantomData); -impl HeaderDownloader for NoopHeaderDownloader { - type Header = Header; +impl HeaderDownloader for NoopHeaderDownloader { + type Header = H; - fn update_local_head(&mut self, _: SealedHeader) {} + fn update_local_head(&mut self, _: SealedHeader) {} fn update_sync_target(&mut self, _: SyncTarget) {} fn set_batch_size(&mut self, _: usize) {} } -impl Stream for NoopHeaderDownloader { - type Item = Result, HeadersDownloaderError

>; +impl Stream for NoopHeaderDownloader { + type Item = Result>, HeadersDownloaderError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 104ecef9e809..62226cb0b1cb 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -10,9 +10,9 @@ use crate::{ use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; +use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitStorageError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -383,12 +383,7 @@ where pub async fn create_provider_factory(&self) -> eyre::Result> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, + N::Primitives: FullNodePrimitives, { let factory = ProviderFactory::new( self.right().clone(), @@ -420,10 +415,10 @@ where .add_stages(DefaultStages::new( factory.clone(), tip_rx, - Arc::new(EthBeaconConsensus::new(self.chain_spec())), + Arc::new(NoopConsensus::default()), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), - NoopBlockExecutorProvider::default(), + NoopBlockExecutorProvider::::default(), self.toml_config().stages.clone(), self.prune_modes(), )) @@ -455,12 +450,7 @@ where ) -> eyre::Result, ProviderFactory>>> where N: ProviderNodeTypes, - N::Primitives: FullNodePrimitives< - Block = reth_primitives::Block, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, + N::Primitives: FullNodePrimitives, { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 264de07048a0..054def94e50d 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -3,7 +3,7 @@ use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, - BeaconConsensusEngineHandle, + BeaconConsensusEngineHandle, EngineNodeTypes, }; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; @@ -11,7 +11,6 @@ use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, - persistence::PersistenceNodeTypes, tree::TreeConfig, }; use reth_engine_util::EngineMessageStreamExt; @@ -28,8 +27,8 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::{EthPrimitives, EthereumHardforks}; -use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; +use reth_primitives::EthereumHardforks; +use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -70,17 +69,13 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: - ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, + Types: EngineNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns> + EngineValidatorAddOn< NodeAdapter, - Validator: EngineValidator< - ::Engine, - Block = BlockTy, - >, + Validator: EngineValidator>, >, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index 0eef0d005763..0235dd929e2a 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -10,7 +10,7 @@ use reth_exex::{ DEFAULT_EXEX_MANAGER_CAPACITY, }; use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::{EthPrimitives, Head}; +use reth_primitives::Head; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use tracing::Instrument; @@ -25,9 +25,7 @@ pub struct ExExLauncher { config_container: WithConfigs<::ChainSpec>, } -impl> + Clone> - ExExLauncher -{ +impl ExExLauncher { /// Create a new `ExExLauncher` with the given extensions. pub const fn new( head: Head, @@ -42,7 +40,9 @@ impl> + Cl /// /// Spawns all extensions and returns the handle to the exex manager if any extensions are /// installed. - pub async fn launch(self) -> eyre::Result> { + pub async fn launch( + self, + ) -> eyre::Result::Primitives>>> { let Self { head, extensions, components, config_container } = self; if extensions.is_empty() { From c9bd64018a0efddeed79911e5af8d28748f19e3a Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 19:21:46 -0500 Subject: [PATCH 43/70] chore: use `BlockWithParent` for `StageError` (#13198) --- Cargo.lock | 1 + .../beacon/src/engine/invalid_headers.rs | 41 ++++++++++--------- crates/consensus/beacon/src/engine/mod.rs | 30 +++++++------- crates/engine/tree/src/tree/mod.rs | 18 ++++---- crates/primitives-traits/src/header/sealed.rs | 7 +++- crates/stages/api/Cargo.toml | 2 + crates/stages/api/src/error.rs | 22 +++++----- crates/stages/api/src/pipeline/ctrl.rs | 4 +- crates/stages/api/src/pipeline/mod.rs | 16 ++++---- crates/stages/stages/src/stages/execution.rs | 15 +++++-- crates/stages/stages/src/stages/headers.rs | 12 +++++- crates/stages/stages/src/stages/merkle.rs | 2 +- .../stages/src/stages/sender_recovery.rs | 2 +- crates/stages/stages/src/stages/utils.rs | 5 ++- testing/testing-utils/src/generators.rs | 15 ++++++- 15 files changed, 117 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3de11a253bd0..08259a54aba2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9293,6 +9293,7 @@ dependencies = [ name = "reth-stages-api" version = "1.1.2" dependencies = [ + "alloy-eips", "alloy-primitives", "aquamarine", "assert_matches", diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 0a72129a6274..2e2bc37a27ee 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,12 +1,11 @@ -use alloy_consensus::Header; +use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; -use std::{fmt::Debug, sync::Arc}; +use std::fmt::Debug; use tracing::warn; /// The max hit counter for invalid headers in the cache before it is forcefully evicted. @@ -17,20 +16,20 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub struct InvalidHeaderCache { +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap>, + headers: LruMap, /// Metrics for the cache. metrics: InvalidHeaderCacheMetrics, } -impl InvalidHeaderCache { +impl InvalidHeaderCache { /// Invalid header cache constructor. pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } - fn insert_entry(&mut self, hash: B256, header: Arc) { + fn insert_entry(&mut self, hash: B256, header: BlockWithParent) { self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); } @@ -38,7 +37,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -53,7 +52,11 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor(&mut self, header_hash: B256, invalid_ancestor: Arc) { + pub fn insert_with_invalid_ancestor( + &mut self, + header_hash: B256, + invalid_ancestor: BlockWithParent, + ) { if self.get(&header_hash).is_none() { warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); self.insert_entry(header_hash, invalid_ancestor); @@ -65,12 +68,10 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { - if self.get(&invalid_ancestor.hash()).is_none() { - let hash = invalid_ancestor.hash(); - let header = invalid_ancestor.unseal(); - warn!(target: "consensus::engine", ?hash, ?header, "Bad block with hash"); - self.insert_entry(hash, Arc::new(header)); + pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { + if self.get(&invalid_ancestor.block.hash).is_none() { + warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); + self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); // update metrics self.metrics.unique_inserts.increment(1); @@ -79,11 +80,11 @@ impl InvalidHeaderCache { } } -struct HeaderEntry { +struct HeaderEntry { /// Keeps track how many times this header has been hit. hit_count: u8, - /// The actually header entry - header: Arc, + /// The actual header entry + header: BlockWithParent, } /// Metrics for the invalid headers cache. @@ -103,13 +104,15 @@ struct InvalidHeaderCacheMetrics { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; + use reth_primitives::SealedHeader; #[test] fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); let header = Header::default(); let header = SealedHeader::seal(header); - cache.insert(header.clone()); + cache.insert(header.block_with_parent()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); for hit in 1..INVALID_HEADER_HIT_EVICTION_THRESHOLD { diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index f188e495be4e..c41f9283db85 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -760,14 +760,14 @@ where // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor let mut current_hash = parent_hash; - let mut current_header = self.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.invalid_headers.get(¤t_hash); + let mut current_block = self.invalid_headers.get(¤t_hash); + while let Some(block) = current_block { + current_hash = block.parent; + current_block = self.invalid_headers.get(¤t_hash); // If current_header is None, then the current_hash does not have an invalid // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && + if current_block.is_none() && self.blockchain.find_block_by_hash(current_hash, BlockSource::Any)?.is_some() { return Ok(Some(current_hash)) @@ -806,13 +806,13 @@ where head: B256, ) -> ProviderResult> { // check if the check hash was previously marked as invalid - let Some(header) = self.invalid_headers.get(&check) else { return Ok(None) }; + let Some(block) = self.invalid_headers.get(&check) else { return Ok(None) }; // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; + let status = self.prepare_invalid_response(block.parent)?; // insert the head block into the invalid header cache - self.invalid_headers.insert_with_invalid_ancestor(head, header); + self.invalid_headers.insert_with_invalid_ancestor(head, block); Ok(Some(status)) } @@ -821,10 +821,10 @@ where /// to a forkchoice update. fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { // check if the head was previously marked as invalid - let Some(header) = self.invalid_headers.get(&head) else { return Ok(None) }; + let Some(block) = self.invalid_headers.get(&head) else { return Ok(None) }; // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + Ok(Some(self.prepare_invalid_response(block.parent)?)) } /// Record latency metrics for one call to make a block canonical @@ -1454,7 +1454,7 @@ where fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "consensus::engine", invalid_num_hash=?bad_block.block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.invalid_headers.insert(*bad_block); return Ok(()) @@ -1673,7 +1673,7 @@ where self.latest_valid_hash_for_invalid_payload(block.parent_hash)? }; // keep track of the invalid header - self.invalid_headers.insert(block.header); + self.invalid_headers.insert(block.header.block_with_parent()); PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: error.to_string() }, latest_valid_hash, @@ -1782,7 +1782,7 @@ where let (block, err) = err.split(); warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - self.invalid_headers.insert(block.header); + self.invalid_headers.insert(block.header.block_with_parent()); } } } @@ -2035,7 +2035,7 @@ mod tests { .await; assert_matches!( res.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) + Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) ); } @@ -2141,7 +2141,7 @@ mod tests { assert_matches!( rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) + Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) ); } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5fc07abf7a2f..ce9bddd90c19 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1328,7 +1328,7 @@ where // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "engine::tree", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "engine::tree", invalid_block=?bad_block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.state.invalid_headers.insert(*bad_block); return Ok(()) @@ -1678,14 +1678,14 @@ where // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor let mut current_hash = parent_hash; - let mut current_header = self.state.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.state.invalid_headers.get(¤t_hash); + let mut current_block = self.state.invalid_headers.get(¤t_hash); + while let Some(block_with_parent) = current_block { + current_hash = block_with_parent.parent; + current_block = self.state.invalid_headers.get(¤t_hash); // If current_header is None, then the current_hash does not have an invalid // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + if current_block.is_none() && self.block_by_hash(current_hash)?.is_some() { return Ok(Some(current_hash)) } } @@ -1735,7 +1735,7 @@ where let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; + let status = self.prepare_invalid_response(header.parent)?; // insert the head block into the invalid header cache self.state.invalid_headers.insert_with_invalid_ancestor(head, header); @@ -1749,7 +1749,7 @@ where // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + Ok(Some(self.prepare_invalid_response(header.parent)?)) } /// Validate if block is correct and satisfies all the consensus rules that concern the header @@ -2395,7 +2395,7 @@ where }; // keep track of the invalid header - self.state.invalid_headers.insert(block.header); + self.state.invalid_headers.insert(block.header.block_with_parent()); Ok(PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: validation_err.to_string() }, latest_valid_hash, diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index e99b0e1c17ff..61b021a0879b 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,7 +1,7 @@ use crate::InMemorySize; pub use alloy_consensus::Header; use alloy_consensus::Sealed; -use alloy_eips::BlockNumHash; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use alloy_primitives::{keccak256, BlockHash, Sealable}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; @@ -65,6 +65,11 @@ impl SealedHeader { pub fn num_hash(&self) -> BlockNumHash { BlockNumHash::new(self.number(), self.hash) } + + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } + } } impl InMemorySize for SealedHeader { diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 88a8e3b96d13..ffa34afa71e7 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -23,7 +23,9 @@ reth-errors.workspace = true reth-stages-types.workspace = true reth-static-file-types.workspace = true +# alloy alloy-primitives.workspace = true +alloy-eips.workspace = true # metrics reth-metrics.workspace = true diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 9a4ef35aaf25..b63dd20f77c1 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,8 +1,8 @@ use crate::PipelineEvent; +use alloy_eips::eip1898::BlockWithParent; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; -use reth_primitives_traits::SealedHeader; use reth_provider::ProviderError; use reth_prune::{PruneSegment, PruneSegmentError, PrunerError}; use reth_static_file_types::StaticFileSegment; @@ -34,10 +34,10 @@ impl BlockErrorKind { #[derive(Error, Debug)] pub enum StageError { /// The stage encountered an error related to a block. - #[error("stage encountered an error in block #{number}: {error}", number = block.number)] + #[error("stage encountered an error in block #{number}: {error}", number = block.block.number)] Block { /// The block that caused the error. - block: Box, + block: Box, /// The specific error type, either consensus or execution error. #[source] error: BlockErrorKind, @@ -48,16 +48,16 @@ pub enum StageError { "stage encountered inconsistent chain: \ downloaded header #{header_number} ({header_hash}) is detached from \ local head #{head_number} ({head_hash}): {error}", - header_number = header.number, - header_hash = header.hash(), - head_number = local_head.number, - head_hash = local_head.hash(), + header_number = header.block.number, + header_hash = header.block.hash, + head_number = local_head.block.number, + head_hash = local_head.block.hash, )] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box, /// The header we attempted to attach. - header: Box, + header: Box, /// The error that occurred when attempting to attach the header. #[source] error: Box, @@ -92,10 +92,10 @@ pub enum StageError { #[error("invalid download response: {0}")] Download(#[from] DownloadError), /// Database is ahead of static file data. - #[error("missing static file data for block number: {number}", number = block.number)] + #[error("missing static file data for block number: {number}", number = block.block.number)] MissingStaticFileData { /// Starting block with missing data. - block: Box, + block: Box, /// Static File segment segment: StaticFileSegment, }, diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 161857552451..378385e97b73 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -1,5 +1,5 @@ +use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::BlockNumber; -use reth_primitives_traits::SealedHeader; /// Determines the control flow during pipeline execution. /// @@ -11,7 +11,7 @@ pub enum ControlFlow { /// The block to unwind to. target: BlockNumber, /// The block that caused the unwind. - bad_block: Box, + bad_block: Box, }, /// The pipeline made progress. Continue { diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index ec57de8d11c9..2cb98d44f93d 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -223,7 +223,7 @@ impl Pipeline { } ControlFlow::Continue { block_number } => self.progress.update(block_number), ControlFlow::Unwind { target, bad_block } => { - self.unwind(target, Some(bad_block.number))?; + self.unwind(target, Some(bad_block.block.number))?; return Ok(ControlFlow::Unwind { target, bad_block }) } } @@ -505,7 +505,7 @@ fn on_stage_error( // We unwind because of a detached head. let unwind_to = - local_head.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); + local_head.block.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head })) } else if let StageError::Block { block, error } = err { match error { @@ -513,7 +513,7 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, "Stage encountered a validation error: {validation_error}" ); @@ -542,7 +542,7 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, "Stage encountered an execution error: {execution_error}" ); @@ -560,12 +560,12 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, segment = %segment, "Stage is missing static file data." ); - Ok(Some(ControlFlow::Unwind { target: block.number - 1, bad_block: block })) + Ok(Some(ControlFlow::Unwind { target: block.block.number - 1, bad_block: block })) } else if err.is_fatal() { error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}"); Err(err.into()) @@ -603,7 +603,7 @@ mod tests { use reth_errors::ProviderError; use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; use reth_prune::PruneModes; - use reth_testing_utils::{generators, generators::random_header}; + use reth_testing_utils::generators::{self, random_block_with_parent}; use tokio_stream::StreamExt; #[test] @@ -975,7 +975,7 @@ mod tests { .add_stage( TestStage::new(StageId::Other("B")) .add_exec(Err(StageError::Block { - block: Box::new(random_header( + block: Box::new(random_block_with_parent( &mut generators::rng(), 5, Default::default(), diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index c8cc89080867..91afc33efaa0 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,6 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; @@ -11,7 +12,7 @@ use reth_evm::{ }; use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; -use reth_primitives::{SealedHeader, StaticFileSegment}; +use reth_primitives::StaticFileSegment; use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, @@ -359,9 +360,15 @@ where let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { - executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::seal(block.header().clone())), - error: BlockErrorKind::Execution(error), + executor.execute_and_verify_one(input).map_err(|error| { + let header = block.header(); + StageError::Block { + block: Box::new(BlockWithParent::new( + header.parent_hash(), + NumHash::new(header.number(), header.hash_slow()), + )), + error: BlockErrorKind::Execution(error), + } }) })?; diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 7b9b394b5615..7ca9cae590b6 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,4 +1,5 @@ use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; @@ -143,7 +144,10 @@ where // Header validation self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { StageError::Block { - block: Box::new(SealedHeader::new(header.clone(), header_hash)), + block: Box::new(BlockWithParent::new( + header.parent_hash, + NumHash::new(header.number, header_hash), + )), error: BlockErrorKind::Validation(error), } })?; @@ -272,7 +276,11 @@ where } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { error!(target: "sync::stages::headers", %error, "Cannot attach header to head"); - return Poll::Ready(Err(StageError::DetachedHead { local_head, header, error })) + return Poll::Ready(Err(StageError::DetachedHead { + local_head: Box::new(local_head.block_with_parent()), + header: Box::new(header.block_with_parent()), + error, + })) } None => return Poll::Ready(Err(StageError::ChannelClosed)), } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 8095dfed9048..ff4d37cf3f61 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -357,7 +357,7 @@ fn validate_state_root( error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), )), - block: Box::new(expected), + block: Box::new(expected.block_with_parent()), }) } } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index d34a4b07921a..b5506068f481 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -192,7 +192,7 @@ where })?; Err(StageError::Block { - block: Box::new(sealed_header), + block: Box::new(sealed_header.block_with_parent()), error: BlockErrorKind::Validation( ConsensusError::TransactionSignerRecoveryError, ), diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 34aaeee44beb..c2a7c6ede02f 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -279,5 +279,8 @@ where let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - Ok(StageError::MissingStaticFileData { block: missing_block, segment }) + Ok(StageError::MissingStaticFileData { + block: Box::new(missing_block.block_with_parent()), + segment, + }) } diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 9963b447e96d..28ba171bdb37 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,7 +1,11 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. use alloy_consensus::{Header, Transaction as _, TxLegacy}; -use alloy_eips::eip4895::{Withdrawal, Withdrawals}; +use alloy_eips::{ + eip1898::BlockWithParent, + eip4895::{Withdrawal, Withdrawals}, + NumHash, +}; use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ @@ -95,6 +99,15 @@ pub fn random_header_range( headers } +/// Generate a random [`BlockWithParent`]. +pub fn random_block_with_parent( + rng: &mut R, + number: u64, + parent: Option, +) -> BlockWithParent { + BlockWithParent { parent: parent.unwrap_or_default(), block: NumHash::new(number, rng.gen()) } +} + /// Generate a random [`SealedHeader`]. /// /// The header is assumed to not be correct if validated. From 5ee776a2ee9fbecc35558f5dc0d31f8459af8658 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 06:04:11 +0400 Subject: [PATCH 44/70] chore: pass primiitves generic to `EngineApiTreeHandler` fields (#13256) --- crates/blockchain-tree/Cargo.toml | 1 + crates/blockchain-tree/src/block_buffer.rs | 34 +++++---- crates/engine/local/src/service.rs | 2 +- crates/engine/service/src/service.rs | 6 +- crates/engine/tree/src/engine.rs | 14 ++-- crates/engine/tree/src/tree/mod.rs | 88 +++++++++++----------- 6 files changed, 76 insertions(+), 69 deletions(-) diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index 3fa6de2b402c..07ecedf882f2 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -31,6 +31,7 @@ reth-consensus.workspace = true reth-node-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 5d4ca2705cb1..994ed82cfb94 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -1,6 +1,8 @@ use crate::metrics::BlockBufferMetrics; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_network::cache::LruCache; +use reth_node_types::Block; use reth_primitives::SealedBlockWithSenders; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -16,9 +18,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block /// is done by last recently used block. #[derive(Debug)] -pub struct BlockBuffer { +pub struct BlockBuffer { /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap, + pub(crate) blocks: HashMap>, /// Map of any parent block hash (even the ones not currently in the buffer) /// to the buffered children. /// Allows connecting buffered blocks by parent. @@ -35,7 +37,7 @@ pub struct BlockBuffer { pub(crate) metrics: BlockBufferMetrics, } -impl BlockBuffer { +impl BlockBuffer { /// Create new buffer with max limit of blocks pub fn new(limit: u32) -> Self { Self { @@ -48,37 +50,37 @@ impl BlockBuffer { } /// Return reference to buffered blocks - pub const fn blocks(&self) -> &HashMap { + pub const fn blocks(&self) -> &HashMap> { &self.blocks } /// Return reference to the requested block. - pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { self.blocks.get(hash) } /// Return a reference to the lowest ancestor of the given block in the buffer. - pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { let mut current_block = self.blocks.get(hash)?; - while let Some(parent) = self.blocks.get(¤t_block.parent_hash) { + while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { current_block = parent; } Some(current_block) } /// Insert a correct block inside the buffer. - pub fn insert_block(&mut self, block: SealedBlockWithSenders) { + pub fn insert_block(&mut self, block: SealedBlockWithSenders) { let hash = block.hash(); - self.parent_to_child.entry(block.parent_hash).or_default().insert(hash); - self.earliest_blocks.entry(block.number).or_default().insert(hash); + self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); + self.earliest_blocks.entry(block.number()).or_default().insert(hash); self.blocks.insert(hash, block); if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) { // evict the block if limit is hit if let Some(evicted_block) = self.remove_block(&evicted_hash) { // evict the block if limit is hit - self.remove_from_parent(evicted_block.parent_hash, &evicted_hash); + self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash); } } self.metrics.blocks.set(self.blocks.len() as f64); @@ -93,7 +95,7 @@ impl BlockBuffer { pub fn remove_block_with_children( &mut self, parent_hash: &BlockHash, - ) -> Vec { + ) -> Vec> { let removed = self .remove_block(parent_hash) .into_iter() @@ -152,16 +154,16 @@ impl BlockBuffer { /// This method will only remove the block if it's present inside `self.blocks`. /// The block might be missing from other collections, the method will only ensure that it has /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option { + fn remove_block(&mut self, hash: &BlockHash) -> Option> { let block = self.blocks.remove(hash)?; - self.remove_from_earliest_blocks(block.number, hash); - self.remove_from_parent(block.parent_hash, hash); + self.remove_from_earliest_blocks(block.number(), hash); + self.remove_from_parent(block.parent_hash(), hash); self.lru.remove(hash); Some(block) } /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec { + fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { // remove all parent child connection and all the child children blocks that are connected // to the discarded parent blocks. let mut remove_parent_children = parent_hashes; diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 0bdc77dbe4b1..b4dd47c43abe 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -52,7 +52,7 @@ where /// Processes requests. /// /// This type is responsible for processing incoming requests. - handler: EngineApiRequestHandler>, + handler: EngineApiRequestHandler>, /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. incoming_requests: EngineMessageStream, } diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 27de4a63605d..c6a87ea076f7 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -17,7 +17,7 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::EthBlockClient; -use reth_node_types::{BlockTy, NodeTypesWithEngine}; +use reth_node_types::{BlockTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; @@ -37,7 +37,9 @@ pub type EngineMessageStream = Pin = ChainOrchestrator< EngineHandler< - EngineApiRequestHandler::Engine>>, + EngineApiRequestHandler< + EngineApiRequest<::Engine, ::Primitives>, + >, EngineMessageStream<::Engine>, BasicBlockDownloader, >, diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 947d025e9ab6..2f0415a1013f 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -238,14 +238,14 @@ impl EngineApiKind { /// The request variants that the engine API handler can receive. #[derive(Debug)] -pub enum EngineApiRequest { +pub enum EngineApiRequest { /// A request received from the consensus engine. Beacon(BeaconEngineMessage), /// Request to insert an already executed block, e.g. via payload building. - InsertExecutedBlock(ExecutedBlock), + InsertExecutedBlock(ExecutedBlock), } -impl Display for EngineApiRequest { +impl Display for EngineApiRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Beacon(msg) => msg.fmt(f), @@ -256,14 +256,16 @@ impl Display for EngineApiRequest { } } -impl From> for EngineApiRequest { +impl From> for EngineApiRequest { fn from(msg: BeaconEngineMessage) -> Self { Self::Beacon(msg) } } -impl From> for FromEngine> { - fn from(req: EngineApiRequest) -> Self { +impl From> + for FromEngine> +{ + fn from(req: EngineApiRequest) -> Self { Self::Request(req) } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index ce9bddd90c19..29c382c28856 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -36,8 +36,8 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, - SealedHeader, + EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, @@ -53,7 +53,6 @@ use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, fmt::Debug, - marker::PhantomData, ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -129,7 +128,7 @@ impl TreeState { } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option>> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -386,19 +385,19 @@ impl TreeState { /// /// This type is not shareable. #[derive(Debug)] -pub struct EngineApiTreeState { +pub struct EngineApiTreeState { /// Tracks the state of the blockchain tree. - tree_state: TreeState, + tree_state: TreeState, /// Tracks the forkchoice state updates received by the CL. forkchoice_state_tracker: ForkchoiceStateTracker, /// Buffer of detached blocks. - buffer: BlockBuffer, + buffer: BlockBuffer, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, } -impl EngineApiTreeState { +impl EngineApiTreeState { fn new( block_buffer_limit: u32, max_invalid_header_cache_length: u32, @@ -474,10 +473,10 @@ where { provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc>, payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. - state: EngineApiTreeState, + state: EngineApiTreeState, /// The half for sending messages to the engine. /// /// This is kept so that we can queue in messages to ourself that we can process later, for @@ -486,20 +485,20 @@ where /// them one by one so that we can handle incoming engine API in between and don't become /// unresponsive. This can happen during live sync transition where we're trying to close the /// gap (up to 3 epochs of blocks in the worst case). - incoming_tx: Sender>>, + incoming_tx: Sender>>, /// Incoming engine API requests. - incoming: Receiver>>, + incoming: Receiver>>, /// Outgoing events that are emitted to the handler. - outgoing: UnboundedSender, + outgoing: UnboundedSender>, /// Channels to the persistence layer. - persistence: PersistenceHandle, + persistence: PersistenceHandle, /// Tracks the state changes of the persistence task. persistence_state: PersistenceState, /// Flag indicating the state of the node's backfill synchronization process. backfill_sync_state: BackfillSyncState, /// Keeps track of the state of the canonical chain that isn't persisted yet. /// This is intended to be accessed from external sources, such as rpc. - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, /// Handle to the payload builder that will receive payload attributes for valid forkchoice /// updates payload_builder: PayloadBuilderHandle, @@ -511,8 +510,6 @@ where invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, - /// Captures the types the engine operates on - _primtives: PhantomData, } impl std::fmt::Debug @@ -546,6 +543,7 @@ where N: NodePrimitives< Block = reth_primitives::Block, BlockHeader = reth_primitives::Header, + BlockBody = reth_primitives::BlockBody, Receipt = reth_primitives::Receipt, >, P: DatabaseProviderFactory @@ -562,16 +560,16 @@ where V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. - #[allow(clippy::too_many_arguments)] + #[expect(clippy::too_many_arguments)] pub fn new( provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc>, payload_validator: V, - outgoing: UnboundedSender, - state: EngineApiTreeState, - canonical_in_memory_state: CanonicalInMemoryState, - persistence: PersistenceHandle, + outgoing: UnboundedSender>, + state: EngineApiTreeState, + canonical_in_memory_state: CanonicalInMemoryState, + persistence: PersistenceHandle, persistence_state: PersistenceState, payload_builder: PayloadBuilderHandle, config: TreeConfig, @@ -597,7 +595,6 @@ where incoming_tx, invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, - _primtives: Default::default(), } } @@ -611,19 +608,19 @@ where /// /// Returns the sender through which incoming requests can be sent to the task and the receiver /// end of a [`EngineApiEvent`] unbounded channel to receive events from the engine. - #[allow(clippy::too_many_arguments)] + #[expect(clippy::complexity)] pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc, + consensus: Arc>, payload_validator: V, - persistence: PersistenceHandle, + persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, invalid_block_hook: Box>, kind: EngineApiKind, - ) -> (Sender>>, UnboundedReceiver) { + ) -> (Sender>>, UnboundedReceiver>) { let best_block_number = provider.best_block_number().unwrap_or(0); let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); @@ -661,7 +658,7 @@ where } /// Returns a new [`Sender`] to send messages to this type. - pub fn sender(&self) -> Sender>> { + pub fn sender(&self) -> Sender>> { self.incoming_tx.clone() } @@ -859,7 +856,7 @@ where /// /// Note: This does not update the tracked state and instead returns the new chain based on the /// given head. - fn on_new_head(&self, new_head: B256) -> ProviderResult> { + fn on_new_head(&self, new_head: B256) -> ProviderResult>> { // get the executed new head block let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { return Ok(None) @@ -1133,7 +1130,7 @@ where /// Returns an error if the engine channel is disconnected. fn try_recv_engine_message( &self, - ) -> Result>>, RecvError> { + ) -> Result>>, RecvError> { if self.persistence_state.in_progress() { // try to receive the next request with a timeout to not block indefinitely match self.incoming.recv_timeout(std::time::Duration::from_millis(500)) { @@ -1211,7 +1208,7 @@ where /// Handles a message from the engine. fn on_engine_message( &mut self, - msg: FromEngine>, + msg: FromEngine>, ) -> Result<(), InsertBlockFatalError> { match msg { FromEngine::Event(event) => match event { @@ -1452,7 +1449,7 @@ where } /// Emits an outgoing event to the engine. - fn emit_event(&mut self, event: impl Into) { + fn emit_event(&mut self, event: impl Into>) { let event = event.into(); if event.is_backfill_action() { @@ -1496,7 +1493,7 @@ where /// Returns a batch of consecutive canonical blocks to persist in the range /// `(last_persisted_number .. canonical_head - threshold]` . The expected /// order is oldest -> newest. - fn get_canonical_blocks_to_persist(&self) -> Vec { + fn get_canonical_blocks_to_persist(&self) -> Vec> { let mut blocks_to_persist = Vec::new(); let mut current_hash = self.state.tree_state.canonical_block_hash(); let last_persisted_number = self.persistence_state.last_persisted_block.number; @@ -1549,7 +1546,7 @@ where /// has in memory. /// /// For finalized blocks, this will return `None`. - fn executed_block_by_hash(&self, hash: B256) -> ProviderResult> { + fn executed_block_by_hash(&self, hash: B256) -> ProviderResult>> { trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first let block = self.state.tree_state.executed_block_by_hash(hash).cloned(); @@ -1595,7 +1592,7 @@ where } /// Return block from database or in-memory state by hash. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { // check database first let mut block = self.provider.block_by_hash(hash)?; if block.is_none() { @@ -1754,7 +1751,10 @@ where /// Validate if block is correct and satisfies all the consensus rules that concern the header /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + fn validate_block( + &self, + block: &SealedBlockWithSenders, + ) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { error!( target: "engine::tree", @@ -1951,7 +1951,7 @@ where /// If either of these are true, then this returns the height of the first block. Otherwise, /// this returns [`None`]. This should be used to check whether or not we should be sending a /// remove command to the persistence task. - fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { + fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { let NewCanonicalChain::Reorg { new, old: _ } = chain_update else { return None }; let BlockNumHash { number: new_num, hash: new_hash } = @@ -1978,7 +1978,7 @@ where /// Invoked when we the canonical chain has been updated. /// /// This is invoked on a valid forkchoice update, or if we can make the target block canonical. - fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { + fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { trace!(target: "engine::tree", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count(), "applying new chain update"); let start = Instant::now(); @@ -2030,7 +2030,7 @@ where } /// This reinserts any blocks in the new chain that do not already exist in the tree - fn reinsert_reorged_blocks(&mut self, new_chain: Vec) { + fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { trace!(target: "engine::tree", num=?block.block.number, hash=?block.block.hash(), "Reinserting block into tree state"); @@ -2296,7 +2296,7 @@ where self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); - let executed: ExecutedBlock = ExecutedBlock { + let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), senders: Arc::new(block.senders), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), @@ -2636,7 +2636,7 @@ mod tests { use reth_engine_primitives::ForkchoiceStatus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; - use reth_primitives::{BlockExt, EthPrimitives}; + use reth_primitives::{Block, BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2708,7 +2708,7 @@ mod tests { EthEngineTypes, EthereumEngineValidator, >, - to_tree_tx: Sender>>, + to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, blocks: Vec, action_rx: Receiver, From b3752cd2e8c94ad447b01cd5514793858a051adc Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Mon, 9 Dec 2024 22:38:26 -0500 Subject: [PATCH 45/70] feat: remove default types from headers stage (#13258) Co-authored-by: Arsenii Kulikov --- crates/stages/stages/src/stages/headers.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 7ca9cae590b6..2a104d7eb6b0 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -4,7 +4,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; use reth_consensus::HeaderValidator; -use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; +use reth_db::{table::Value, tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::DbTxMut, @@ -13,7 +13,7 @@ use reth_db_api::{ use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; use reth_primitives::{NodePrimitives, SealedHeader, StaticFileSegment}; -use reth_primitives_traits::serde_bincode_compat; +use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader}; use reth_provider::{ providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, StaticFileProviderFactory, @@ -93,11 +93,9 @@ where /// database table. fn write_headers

(&mut self, provider: &P) -> Result where - P: DBProvider - + StaticFileProviderFactory< - Primitives: NodePrimitives, - >, + P: DBProvider + StaticFileProviderFactory, Downloader: HeaderDownloader

::BlockHeader>, + ::BlockHeader: Value + FullBlockHeader, { let total_headers = self.header_collector.len(); @@ -145,8 +143,8 @@ where self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { StageError::Block { block: Box::new(BlockWithParent::new( - header.parent_hash, - NumHash::new(header.number, header_hash), + header.parent_hash(), + NumHash::new(header.number(), header_hash), )), error: BlockErrorKind::Validation(error), } @@ -203,9 +201,9 @@ where impl Stage for HeaderStage where Provider: DBProvider + StaticFileProviderFactory, - Provider::Primitives: NodePrimitives, P: HeaderSyncGapProvider
::BlockHeader>, D: HeaderDownloader
::BlockHeader>, + ::BlockHeader: FullBlockHeader + Value, { /// Return the id of the stage fn id(&self) -> StageId { From d856c8e5bce7b099448a1dad756f4b41f4a6b70e Mon Sep 17 00:00:00 2001 From: Miguel Oliveira Date: Tue, 10 Dec 2024 05:39:43 -0300 Subject: [PATCH 46/70] feat(engine): add error logging in `state_hook` (#13252) Co-authored-by: Federico Gimenez --- crates/engine/tree/src/tree/root.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 53a881387e76..e2ed6aa14706 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -283,7 +283,9 @@ where let state_hook = StateHookSender::new(self.tx.clone()); move |state: &EvmState| { - let _ = state_hook.send(StateRootMessage::StateUpdate(state.clone())); + if let Err(error) = state_hook.send(StateRootMessage::StateUpdate(state.clone())) { + error!(target: "engine::root", ?error, "Failed to send state update"); + } } } From da99986ea2e9914943715cc635bc9209f179d492 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 13:06:39 +0400 Subject: [PATCH 47/70] feat: relax bounds for `EngineApiTreeHandler` (#13257) Co-authored-by: Matthias Seitz --- Cargo.lock | 3 + crates/blockchain-tree-api/Cargo.toml | 2 + crates/blockchain-tree-api/src/error.rs | 52 +++---- crates/consensus/beacon/src/engine/event.rs | 6 +- crates/engine/local/src/service.rs | 2 +- crates/engine/service/src/service.rs | 1 + crates/engine/tree/Cargo.toml | 2 + crates/engine/tree/src/engine.rs | 36 ++--- crates/engine/tree/src/tree/mod.rs | 151 +++++++++++--------- 9 files changed, 139 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08259a54aba2..3bcc998cf21d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6712,11 +6712,13 @@ dependencies = [ name = "reth-blockchain-tree-api" version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "reth-consensus", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", "thiserror 2.0.5", ] @@ -7408,6 +7410,7 @@ dependencies = [ "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index b1c01f85938f..83ae378090b6 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -14,9 +14,11 @@ workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-errors.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index 4dd42c889a36..92866b4d4dad 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -1,11 +1,13 @@ //! Error handling for the blockchain tree +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::ConsensusError; use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; -use reth_primitives::SealedBlock; +use reth_primitives::{SealedBlock, SealedBlockFor}; +use reth_primitives_traits::{Block, BlockBody}; pub use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. @@ -210,48 +212,48 @@ impl InsertBlockErrorData { } } -struct InsertBlockErrorDataTwo { - block: SealedBlock, +struct InsertBlockErrorDataTwo { + block: SealedBlockFor, kind: InsertBlockErrorKindTwo, } -impl std::fmt::Display for InsertBlockErrorDataTwo { +impl std::fmt::Display for InsertBlockErrorDataTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Failed to insert block (hash={}, number={}, parent_hash={}): {}", self.block.hash(), - self.block.number, - self.block.parent_hash, + self.block.number(), + self.block.parent_hash(), self.kind ) } } -impl std::fmt::Debug for InsertBlockErrorDataTwo { +impl std::fmt::Debug for InsertBlockErrorDataTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InsertBlockError") .field("error", &self.kind) .field("hash", &self.block.hash()) - .field("number", &self.block.number) - .field("parent_hash", &self.block.parent_hash) - .field("num_txs", &self.block.body.transactions.len()) + .field("number", &self.block.number()) + .field("parent_hash", &self.block.parent_hash()) + .field("num_txs", &self.block.body.transactions().len()) .finish_non_exhaustive() } } -impl core::error::Error for InsertBlockErrorDataTwo { +impl core::error::Error for InsertBlockErrorDataTwo { fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { Some(&self.kind) } } -impl InsertBlockErrorDataTwo { - const fn new(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Self { +impl InsertBlockErrorDataTwo { + const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { Self { block, kind } } - fn boxed(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Box { + fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { Box::new(Self::new(block, kind)) } } @@ -259,36 +261,36 @@ impl InsertBlockErrorDataTwo { /// Error thrown when inserting a block failed because the block is considered invalid. #[derive(thiserror::Error)] #[error(transparent)] -pub struct InsertBlockErrorTwo { - inner: Box, +pub struct InsertBlockErrorTwo { + inner: Box>, } // === impl InsertBlockErrorTwo === -impl InsertBlockErrorTwo { +impl InsertBlockErrorTwo { /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Self { + pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { + pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlock) -> Self { + pub fn sender_recovery_error(block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) } /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlock) -> Self { + pub fn execution_error(error: BlockExecutionError, block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::Execution(error)) } /// Consumes the error and returns the block that resulted in the error #[inline] - pub fn into_block(self) -> SealedBlock { + pub fn into_block(self) -> SealedBlockFor { self.inner.block } @@ -300,19 +302,19 @@ impl InsertBlockErrorTwo { /// Returns the block that resulted in the error #[inline] - pub const fn block(&self) -> &SealedBlock { + pub const fn block(&self) -> &SealedBlockFor { &self.inner.block } /// Consumes the type and returns the block and error kind. #[inline] - pub fn split(self) -> (SealedBlock, InsertBlockErrorKindTwo) { + pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { let inner = *self.inner; (inner.block, inner.kind) } } -impl std::fmt::Debug for InsertBlockErrorTwo { +impl std::fmt::Debug for InsertBlockErrorTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.inner, f) } diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index b503e1e102af..acf056b3ff47 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -2,7 +2,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; use reth_engine_primitives::ForkchoiceStatus; -use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlockFor, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, @@ -15,9 +15,9 @@ pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc>, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc>, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index b4dd47c43abe..57fdc0c254ea 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -52,7 +52,7 @@ where /// Processes requests. /// /// This type is responsible for processing incoming requests. - handler: EngineApiRequestHandler>, + handler: EngineApiRequestHandler, N::Primitives>, /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. incoming_requests: EngineMessageStream, } diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index c6a87ea076f7..5dfe4184257b 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -39,6 +39,7 @@ type EngineServiceType = ChainOrchestrator< EngineHandler< EngineApiRequestHandler< EngineApiRequest<::Engine, ::Primitives>, + ::Primitives, >, EngineMessageStream<::Engine>, BasicBlockDownloader, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 67cb72850ae6..f428c8771cba 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -26,6 +26,7 @@ reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true @@ -108,6 +109,7 @@ test-utils = [ "reth-network-p2p/test-utils", "reth-payload-builder/test-utils", "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-prune-types", "reth-prune-types?/test-utils", diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 2f0415a1013f..9fa0a8c1d214 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -11,6 +11,7 @@ use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives_traits::Block; use std::{ collections::HashSet, fmt::Display, @@ -66,7 +67,7 @@ impl EngineHandler { impl ChainHandler for EngineHandler where - T: EngineRequestHandler, + T: EngineRequestHandler, S: Stream + Send + Sync + Unpin + 'static, ::Item: Into, D: BlockDownloader, @@ -139,9 +140,11 @@ pub trait EngineRequestHandler: Send + Sync { type Event: Send; /// The request type this handler can process. type Request; + /// Type of the block sent in [`FromEngine::DownloadedBlocks`] variant. + type Block: Block; /// Informs the handler about an event from the [`EngineHandler`]. - fn on_event(&mut self, event: FromEngine); + fn on_event(&mut self, event: FromEngine); /// Advances the handler. fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; @@ -167,31 +170,32 @@ pub trait EngineRequestHandler: Send + Sync { /// In case required blocks are missing, the handler will request them from the network, by emitting /// a download request upstream. #[derive(Debug)] -pub struct EngineApiRequestHandler { +pub struct EngineApiRequestHandler { /// channel to send messages to the tree to execute the payload. - to_tree: Sender>, + to_tree: Sender>, /// channel to receive messages from the tree. - from_tree: UnboundedReceiver, + from_tree: UnboundedReceiver>, } -impl EngineApiRequestHandler { +impl EngineApiRequestHandler { /// Creates a new `EngineApiRequestHandler`. pub const fn new( - to_tree: Sender>, - from_tree: UnboundedReceiver, + to_tree: Sender>, + from_tree: UnboundedReceiver>, ) -> Self { Self { to_tree, from_tree } } } -impl EngineRequestHandler for EngineApiRequestHandler +impl EngineRequestHandler for EngineApiRequestHandler where Request: Send, { - type Event = BeaconConsensusEngineEvent; + type Event = BeaconConsensusEngineEvent; type Request = Request; + type Block = N::Block; - fn on_event(&mut self, event: FromEngine) { + fn on_event(&mut self, event: FromEngine) { // delegate to the tree let _ = self.to_tree.send(event); } @@ -263,7 +267,7 @@ impl From> for EngineA } impl From> - for FromEngine> + for FromEngine, N::Block> { fn from(req: EngineApiRequest) -> Self { Self::Request(req) @@ -297,16 +301,16 @@ impl From> for EngineApiEvent { +pub enum FromEngine { /// Event from the top level orchestrator. Event(FromOrchestrator), /// Request from the engine. Request(Req), /// Downloaded blocks from the network. - DownloadedBlocks(Vec), + DownloadedBlocks(Vec>), } -impl Display for FromEngine { +impl Display for FromEngine { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Event(ev) => write!(f, "Event({ev:?})"), @@ -318,7 +322,7 @@ impl Display for FromEngine { } } -impl From for FromEngine { +impl From for FromEngine { fn from(event: FromOrchestrator) -> Self { Self::Event(event) } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 29c382c28856..234a96a47d07 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -5,7 +5,7 @@ use crate::{ persistence::PersistenceHandle, tree::metrics::EngineApiMetrics, }; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -36,9 +36,10 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, + EthPrimitives, GotExpected, NodePrimitives, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, }; +use reth_primitives_traits::Block; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, @@ -485,9 +486,9 @@ where /// them one by one so that we can handle incoming engine API in between and don't become /// unresponsive. This can happen during live sync transition where we're trying to close the /// gap (up to 3 epochs of blocks in the worst case). - incoming_tx: Sender>>, + incoming_tx: Sender, N::Block>>, /// Incoming engine API requests. - incoming: Receiver>>, + incoming: Receiver, N::Block>>, /// Outgoing events that are emitted to the handler. outgoing: UnboundedSender>, /// Channels to the persistence layer. @@ -540,24 +541,20 @@ where impl EngineApiTreeHandler where - N: NodePrimitives< - Block = reth_primitives::Block, - BlockHeader = reth_primitives::Header, - BlockBody = reth_primitives::BlockBody, - Receipt = reth_primitives::Receipt, - >, + N: NodePrimitives, P: DatabaseProviderFactory + BlockReader + StateProviderFactory - + StateReader + + StateReader + StateCommitmentProvider + HashedPostStateProvider + Clone + 'static, -

::Provider: BlockReader, +

::Provider: + BlockReader, E: BlockExecutorProvider, T: EngineTypes, - V: EngineValidator, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[expect(clippy::too_many_arguments)] @@ -620,7 +617,8 @@ where config: TreeConfig, invalid_block_hook: Box>, kind: EngineApiKind, - ) -> (Sender>>, UnboundedReceiver>) { + ) -> (Sender, N::Block>>, UnboundedReceiver>) + { let best_block_number = provider.best_block_number().unwrap_or(0); let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); @@ -658,7 +656,7 @@ where } /// Returns a new [`Sender`] to send messages to this type. - pub fn sender(&self) -> Sender>> { + pub fn sender(&self) -> Sender, N::Block>> { self.incoming_tx.clone() } @@ -698,7 +696,7 @@ where /// block request processing isn't blocked for a long time. fn on_downloaded( &mut self, - mut blocks: Vec, + mut blocks: Vec>, ) -> Result, InsertBlockFatalError> { if blocks.is_empty() { // nothing to execute @@ -797,7 +795,7 @@ where let block_hash = block.hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash; + lowest_buffered_ancestor = block.parent_hash(); } // now check the block itself @@ -862,11 +860,11 @@ where return Ok(None) }; - let new_head_number = new_head_block.block.number; + let new_head_number = new_head_block.block.number(); let mut current_canonical_number = self.state.tree_state.current_canonical_head.number; let mut new_chain = vec![new_head_block.clone()]; - let mut current_hash = new_head_block.block.parent_hash; + let mut current_hash = new_head_block.block.parent_hash(); let mut current_number = new_head_number - 1; // Walk back the new chain until we reach a block we know about @@ -875,7 +873,7 @@ where // that are _above_ the current canonical head. while current_number > current_canonical_number { if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); current_number -= 1; new_chain.push(block); } else { @@ -904,7 +902,7 @@ where while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); - old_hash = block.block.header.parent_hash; + old_hash = block.block.header.parent_hash(); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -920,7 +918,7 @@ where // a common ancestor (fork block) is reached. while old_hash != current_hash { if let Some(block) = self.executed_block_by_hash(old_hash)? { - old_hash = block.block.header.parent_hash; + old_hash = block.block.header.parent_hash(); old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain @@ -929,7 +927,7 @@ where } if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); new_chain.push(block); } else { // This shouldn't happen as we've already walked this path @@ -958,10 +956,10 @@ where return Ok(false) } // We already passed the canonical head - if current_block.number <= canonical_head.number { + if current_block.number() <= canonical_head.number { break } - current_hash = current_block.parent_hash; + current_hash = current_block.parent_hash(); } // verify that the given hash is not already part of canonical chain stored in memory @@ -1040,7 +1038,7 @@ where // to return an error ProviderError::HeaderNotFound(state.head_block_hash.into()) })?; - let updated = self.process_payload_attributes(attr, &tip, state, version); + let updated = self.process_payload_attributes(attr, tip.header(), state, version); return Ok(TreeOutcome::new(updated)) } @@ -1069,13 +1067,13 @@ where // 3. check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { - debug!(target: "engine::tree", head = canonical_header.number, "fcu head block is already canonical"); + debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical"); // For OpStack the proposers are allowed to reorg their own chain at will, so we need to // always trigger a new payload job if requested. if self.engine_kind.is_opstack() { if let Some(attr) = attrs { - debug!(target: "engine::tree", head = canonical_header.number, "handling payload attributes for canonical head"); + debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); let updated = self.process_payload_attributes(attr, &canonical_header, state, version); return Ok(TreeOutcome::new(updated)) @@ -1128,9 +1126,10 @@ where /// received in time. /// /// Returns an error if the engine channel is disconnected. + #[expect(clippy::type_complexity)] fn try_recv_engine_message( &self, - ) -> Result>>, RecvError> { + ) -> Result, N::Block>>, RecvError> { if self.persistence_state.in_progress() { // try to receive the next request with a timeout to not block indefinitely match self.incoming.recv_timeout(std::time::Duration::from_millis(500)) { @@ -1208,7 +1207,7 @@ where /// Handles a message from the engine. fn on_engine_message( &mut self, - msg: FromEngine>, + msg: FromEngine, N::Block>, ) -> Result<(), InsertBlockFatalError> { match msg { FromEngine::Event(event) => match event { @@ -1384,7 +1383,7 @@ where .state .buffer .block(&sync_target_state.finalized_block_hash) - .map(|block| block.number); + .map(|block| block.number()); // The block number that the backfill finished at - if the progress or newest // finalized is None then we can't check the distance anyways. @@ -1505,15 +1504,15 @@ where debug!(target: "engine::tree", ?last_persisted_number, ?canonical_head_number, ?target_number, ?current_hash, "Returning canonical blocks to persist"); while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { - if block.block.number <= last_persisted_number { + if block.block.number() <= last_persisted_number { break; } - if block.block.number <= target_number { + if block.block.number() <= target_number { blocks_to_persist.push(block.clone()); } - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); } // reverse the order so that the oldest block comes first @@ -1579,7 +1578,10 @@ where } /// Return sealed block from database or in-memory state by hash. - fn sealed_header_by_hash(&self, hash: B256) -> ProviderResult> { + fn sealed_header_by_hash( + &self, + hash: B256, + ) -> ProviderResult>> { // check memory first let block = self.state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone().header); @@ -1649,7 +1651,7 @@ where self.state .buffer .lowest_ancestor(&hash) - .map(|block| block.parent_hash) + .map(|block| block.parent_hash()) .unwrap_or_else(|| hash) } @@ -1696,7 +1698,7 @@ where // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // PoW block, which we need to identify by looking at the parent's block difficulty if let Some(parent) = self.block_by_hash(parent_hash)? { - if !parent.is_zero_difficulty() { + if !parent.header().difficulty().is_zero() { parent_hash = B256::ZERO; } } @@ -1823,8 +1825,8 @@ where /// Returns an error if sender recovery failed or inserting into the buffer failed. fn buffer_block_without_senders( &mut self, - block: SealedBlock, - ) -> Result<(), InsertBlockErrorTwo> { + block: SealedBlockFor, + ) -> Result<(), InsertBlockErrorTwo> { match block.try_seal_with_senders() { Ok(block) => self.buffer_block(block), Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), @@ -1832,7 +1834,10 @@ where } /// Pre-validates the block and inserts it into the buffer. - fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockErrorTwo> { + fn buffer_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result<(), InsertBlockErrorTwo> { if let Err(err) = self.validate_block(&block) { return Err(InsertBlockErrorTwo::consensus_error(err, block.block)) } @@ -1886,7 +1891,7 @@ where // if we have buffered the finalized block, we should check how far // we're off exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number); + self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number()); } // If this is invoked after we downloaded a block we can check if this block is the @@ -2011,7 +2016,7 @@ where self.canonical_in_memory_state.set_canonical_head(tip.clone()); // Update metrics based on new tip - self.metrics.tree.canonical_chain_height.set(tip.number as f64); + self.metrics.tree.canonical_chain_height.set(tip.number() as f64); // sends an event to all active listeners about the new canonical chain self.canonical_in_memory_state.notify_canon_state(notification); @@ -2033,7 +2038,7 @@ where fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { - trace!(target: "engine::tree", num=?block.block.number, hash=?block.block.hash(), "Reinserting block into tree state"); + trace!(target: "engine::tree", num=?block.block.number(), hash=?block.block.hash(), "Reinserting block into tree state"); self.state.tree_state.insert_executed(block); } } @@ -2086,10 +2091,10 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number,), target = "engine::tree")] + #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] fn on_downloaded_block( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, ) -> Result, InsertBlockFatalError> { let block_num_hash = block.num_hash(); let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); @@ -2147,8 +2152,8 @@ where fn insert_block_without_senders( &mut self, - block: SealedBlock, - ) -> Result { + block: SealedBlockFor, + ) -> Result> { match block.try_seal_with_senders() { Ok(block) => self.insert_block(block), Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), @@ -2157,17 +2162,17 @@ where fn insert_block( &mut self, - block: SealedBlockWithSenders, - ) -> Result { + block: SealedBlockWithSenders, + ) -> Result> { self.insert_block_inner(block.clone()) .map_err(|kind| InsertBlockErrorTwo::new(block.block, kind)) } fn insert_block_inner( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, ) -> Result { - debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash, state_root = ?block.state_root, "Inserting new block into tree"); + debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); if self.block_by_hash(block.hash())?.is_some() { return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) @@ -2179,14 +2184,14 @@ where // validate block consensus rules self.validate_block(&block)?; - trace!(target: "engine::tree", block=?block.num_hash(), parent=?block.parent_hash, "Fetching block state provider"); - let Some(state_provider) = self.state_provider(block.parent_hash)? else { + trace!(target: "engine::tree", block=?block.num_hash(), parent=?block.parent_hash(), "Fetching block state provider"); + let Some(state_provider) = self.state_provider(block.parent_hash())? else { // we don't have the state required to execute this block, buffering it and find the // missing parent block let missing_ancestor = self .state .buffer - .lowest_ancestor(&block.parent_hash) + .lowest_ancestor(&block.parent_hash()) .map(|block| block.parent_num_hash()) .unwrap_or_else(|| block.parent_num_hash()); @@ -2199,9 +2204,9 @@ where }; // now validate against the parent - let parent_block = self.sealed_header_by_hash(block.parent_hash)?.ok_or_else(|| { + let parent_block = self.sealed_header_by_hash(block.parent_hash())?.ok_or_else(|| { InsertBlockErrorKindTwo::Provider(ProviderError::HeaderNotFound( - block.parent_hash.into(), + block.parent_hash().into(), )) })?; if let Err(e) = self.consensus.validate_header_against_parent(&block, &parent_block) { @@ -2212,7 +2217,7 @@ where trace!(target: "engine::tree", block=?block.num_hash(), "Executing block"); let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); - let block_number = block.number; + let block_number = block.number(); let block_hash = block.hash(); let sealed_block = Arc::new(block.block.clone()); let block = block.unseal(); @@ -2260,7 +2265,7 @@ where let persistence_in_progress = self.persistence_state.in_progress(); if !persistence_in_progress { state_root_result = match self - .compute_state_root_parallel(block.parent_hash, &hashed_state) + .compute_state_root_parallel(block.header().parent_hash(), &hashed_state) { Ok((state_root, trie_output)) => Some((state_root, trie_output)), Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { @@ -2278,7 +2283,7 @@ where state_provider.state_root_with_updates(hashed_state.clone())? }; - if state_root != block.state_root { + if state_root != block.header().state_root() { // call post-block hook self.invalid_block_hook.on_invalid_block( &parent_block, @@ -2287,7 +2292,7 @@ where Some((&trie_output, state_root)), ); return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), + GotExpected { got: state_root, expected: block.header().state_root() }.into(), ) .into()) } @@ -2304,7 +2309,7 @@ where trie: Arc::new(trie_output), }; - if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash { + if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash() { debug!(target: "engine::tree", pending = ?executed.block().num_hash() ,"updating pending block"); // if the parent is the canonical head, we can insert the block as the pending block self.canonical_in_memory_state.set_pending_block(executed.clone()); @@ -2375,7 +2380,7 @@ where /// Returns the proper payload status response if the block is invalid. fn on_insert_block_error( &mut self, - error: InsertBlockErrorTwo, + error: InsertBlockErrorTwo, ) -> Result { let (block, error) = error.split(); @@ -2386,12 +2391,12 @@ where // If the error was due to an invalid payload, the payload is added to the // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is // returned. - warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number, %validation_err, "Invalid block error on new payload"); + warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number(), %validation_err, "Invalid block error on new payload"); let latest_valid_hash = if validation_err.is_block_pre_merge() { // zero hash must be returned if block is pre-merge Some(B256::ZERO) } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash)? + self.latest_valid_hash_for_invalid_payload(block.parent_hash())? }; // keep track of the invalid header @@ -2403,7 +2408,10 @@ where } /// Attempts to find the header for the given block hash if it is canonical. - pub fn find_canonical_header(&self, hash: B256) -> Result, ProviderError> { + pub fn find_canonical_header( + &self, + hash: B256, + ) -> Result>, ProviderError> { let mut canonical = self.canonical_in_memory_state.header_by_hash(hash); if canonical.is_none() { @@ -2434,7 +2442,7 @@ where { // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: - let _ = self.persistence.save_finalized_block_number(finalized.number); + let _ = self.persistence.save_finalized_block_number(finalized.number()); self.canonical_in_memory_state.set_finalized(finalized); } } @@ -2462,7 +2470,7 @@ where if Some(safe.num_hash()) != self.canonical_in_memory_state.get_safe_num_hash() { // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: - let _ = self.persistence.save_safe_block_number(safe.number); + let _ = self.persistence.save_safe_block_number(safe.number()); self.canonical_in_memory_state.set_safe(safe); } } @@ -2537,7 +2545,7 @@ where fn process_payload_attributes( &self, attrs: T::PayloadAttributes, - head: &Header, + head: &N::BlockHeader, state: ForkchoiceState, version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { @@ -2626,6 +2634,7 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; + use alloy_consensus::Header; use alloy_primitives::Bytes; use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; @@ -2708,7 +2717,7 @@ mod tests { EthEngineTypes, EthereumEngineValidator, >, - to_tree_tx: Sender>>, + to_tree_tx: Sender, Block>>, from_tree_rx: UnboundedReceiver, blocks: Vec, action_rx: Receiver, @@ -2843,7 +2852,7 @@ mod tests { fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result { + ) -> Result> { let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); self.extend_execution_outcome([execution_outcome]); self.tree.provider.add_state_root(block.state_root); From 15470b43502da858ccef99af203e8408b1422849 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 10 Dec 2024 04:29:51 -0500 Subject: [PATCH 48/70] feat: make bodies downloader generic over header (#13259) --- crates/net/downloaders/src/bodies/bodies.rs | 93 ++++++++++---------- crates/net/downloaders/src/bodies/noop.rs | 14 ++- crates/net/downloaders/src/bodies/queue.rs | 21 +++-- crates/net/downloaders/src/bodies/request.rs | 22 ++--- crates/net/downloaders/src/bodies/task.rs | 21 +++-- crates/net/p2p/src/bodies/downloader.rs | 7 +- crates/net/p2p/src/bodies/response.rs | 5 +- crates/node/builder/src/setup.rs | 2 +- crates/stages/stages/src/stages/bodies.rs | 17 ++-- crates/stages/stages/src/stages/utils.rs | 2 +- 10 files changed, 112 insertions(+), 92 deletions(-) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index bdf2aca9c778..54026070ec8b 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -35,11 +35,11 @@ use tracing::info; /// All blocks in a batch are fetched at the same time. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] -pub struct BodiesDownloader { +pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc>, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -57,11 +57,11 @@ pub struct BodiesDownloader { /// The latest block number returned. latest_queued_block_number: Option, /// Requests in progress - in_progress_queue: BodiesRequestQueue, + in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap>, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec>, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } @@ -69,7 +69,7 @@ pub struct BodiesDownloader { impl BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. fn next_headers_request(&self) -> DownloadResult>>> { @@ -193,14 +193,16 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec>) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response(&mut self) -> Option> { + fn pop_buffered_response( + &mut self, + ) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -210,13 +212,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response( - &mut self, - response: Vec>, - ) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::>(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -230,9 +229,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered( - &mut self, - ) -> Option>> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -258,9 +255,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch( - &mut self, - ) -> Option>> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -292,12 +287,17 @@ where Self: BodyDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader<::Body> { + pub fn into_task( + self, + ) -> TaskDownloader<::Header, ::Body> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader<::Body> + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header, ::Body> where S: TaskSpawner, { @@ -308,8 +308,9 @@ where impl BodyDownloader for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider

+ Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { + type Header = Provider::Header; type Body = B::Body; /// Set a new download range (exclusive). @@ -358,9 +359,9 @@ where impl Stream for BodiesDownloader where B: BodiesClient + 'static, - Provider: HeaderProvider
+ Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -442,13 +443,28 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec>, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { + #[inline] + fn len(&self) -> usize { + self.resp.len() + } + + /// Returns the size of the response in bytes + /// + /// See [`BlockResponse::size`] + #[inline] + const fn size(&self) -> usize { + self.size + } +} + +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -464,36 +480,23 @@ impl OrderedBodiesResponse { fn block_range(&self) -> RangeInclusive { self.first_block_number()..=self.resp.last().expect("is not empty").block_number() } - - #[inline] - fn len(&self) -> usize { - self.resp.len() - } - - /// Returns the size of the response in bytes - /// - /// See [`BlockResponse::size`] - #[inline] - const fn size(&self) -> usize { - self.size - } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -573,7 +576,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc>, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index dd3e6e9691b9..b7a9431a4d7b 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -9,18 +9,24 @@ use std::{fmt::Debug, ops::RangeInclusive}; /// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopBodiesDownloader(std::marker::PhantomData); +pub struct NoopBodiesDownloader { + _header: std::marker::PhantomData, + _body: std::marker::PhantomData, +} -impl BodyDownloader for NoopBodiesDownloader { +impl + BodyDownloader for NoopBodiesDownloader +{ type Body = B; + type Header = H; fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } -impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; +impl Stream for NoopBodiesDownloader { + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 5f1e8b059cf8..ed8c425e6114 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -1,5 +1,6 @@ use super::request::BodiesRequestFuture; use crate::metrics::BodyDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; @@ -19,18 +20,19 @@ use std::{ /// The wrapper around [`FuturesUnordered`] that keeps information /// about the blocks currently being requested. #[derive(Debug)] -pub(crate) struct BodiesRequestQueue { +pub(crate) struct BodiesRequestQueue { /// Inner body request queue. - inner: FuturesUnordered>, + inner: FuturesUnordered>, /// The downloader metrics. metrics: BodyDownloaderMetrics, /// Last requested block number. pub(crate) last_requested_block_number: Option, } -impl BodiesRequestQueue +impl BodiesRequestQueue where B: BodiesClient + 'static, + H: BlockHeader, { /// Create new instance of request queue. pub(crate) fn new(metrics: BodyDownloaderMetrics) -> Self { @@ -58,15 +60,15 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc>, - request: Vec, + consensus: Arc>, + request: Vec>, ) { // Set last max requested block number self.last_requested_block_number = request .last() .map(|last| match self.last_requested_block_number { - Some(num) => last.number.max(num), - None => last.number, + Some(num) => last.number().max(num), + None => last.number(), }) .or(self.last_requested_block_number); // Create request and push into the queue. @@ -76,11 +78,12 @@ where } } -impl Stream for BodiesRequestQueue +impl Stream for BodiesRequestQueue where + H: BlockHeader + Send + Sync + Unpin + 'static, B: BodiesClient + 'static, { - type Item = DownloadResult>>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 28cfdb61b7cd..92f46fa6fdd6 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -38,30 +38,31 @@ use std::{ /// All errors regarding the response cause the peer to get penalized, meaning that adversaries /// that try to give us bodies that do not match the requested order are going to be penalized /// and eventually disconnected. -pub(crate) struct BodiesRequestFuture { +pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. response_metrics: ResponseMetrics, // Headers to download. The collection is shrunk as responses are buffered. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// Internal buffer for all blocks - buffer: Vec>, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, } -impl BodiesRequestFuture +impl BodiesRequestFuture where B: BodiesClient + 'static, + H: BlockHeader, { /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -76,7 +77,7 @@ where } } - pub(crate) fn with_headers(mut self, headers: Vec) -> Self { + pub(crate) fn with_headers(mut self, headers: Vec>) -> Self { self.buffer.reserve_exact(headers.len()); self.pending_headers = VecDeque::from(headers); // Submit the request only if there are any headers to download. @@ -192,7 +193,7 @@ where if let Err(error) = self.consensus.validate_block_pre_execution(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); - let number = block.number; + let number = block.number(); self.pending_headers.push_front(block.header); return Err(DownloadError::BodyValidation { hash, @@ -213,11 +214,12 @@ where } } -impl Future for BodiesRequestFuture +impl Future for BodiesRequestFuture where + H: BlockHeader + Unpin + Send + Sync + 'static, B: BodiesClient + 'static, { - type Output = DownloadResult>>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 89af9813e3cc..9377be78676c 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -24,15 +24,15 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -64,7 +64,7 @@ impl TaskDownloader { /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -73,7 +73,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader
+ 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -91,7 +91,10 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl + BodyDownloader for TaskDownloader +{ + type Header = H; type Body = B; fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { @@ -100,8 +103,8 @@ impl BodyDownloader for TaskDownloader } } -impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -111,7 +114,7 @@ impl Stream for TaskDownloader { /// A [`BodyDownloader`] that runs on its own task struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender>, + bodies_tx: PollSender>, downloader: T, } diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index 06f35fc9bd69..b80a308d8a18 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -5,7 +5,7 @@ use futures::Stream; use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// @@ -13,8 +13,11 @@ pub type BodyDownloaderResult = DownloadResult> + Unpin + Send + Sync + Stream> + Unpin { + /// The type of header that can be returned in a blck + type Header: Debug + Send + Sync + Unpin + 'static; + /// The type of the body that is being downloaded. type Body: Debug + Send + Sync + Unpin + 'static; diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 02534ea09637..1b415246f544 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,10 +1,11 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; -use reth_primitives_traits::{BlockHeader, InMemorySize}; +use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) Full(SealedBlock), /// The empty block response diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 0a0e4f10dbc9..6dff28bd39b7 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -87,7 +87,7 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader
> + 'static, - B: BodyDownloader> + 'static, + B: BodyDownloader
, Body = BodyTy> + 'static, Executor: BlockExecutorProvider, N::Primitives: NodePrimitives, { diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 88a1b96e249e..0f311b1bc9e0 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -5,7 +5,7 @@ use reth_db::{tables, transaction::DbTx}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; -use reth_primitives_traits::{Block, BlockBody}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader}; use reth_provider::{ providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, StorageLocation, @@ -56,7 +56,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>>, + buffer: Option>>, } impl BodyStage { @@ -72,9 +72,7 @@ impl BodyStage { unwind_block: Option, ) -> Result<(), StageError> where - Provider: DBProvider - + BlockReader
- + StaticFileProviderFactory, + Provider: DBProvider + BlockReader + StaticFileProviderFactory, { // Get id for the next tx_num of zero if there are no transactions. let next_tx_num = provider @@ -151,9 +149,9 @@ where Provider: DBProvider + StaticFileProviderFactory + StatsReader - + BlockReader
- + BlockWriter>, - D: BodyDownloader>, + + BlockReader + + BlockWriter>, + D: BodyDownloader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -764,6 +762,7 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { + type Header = Header; type Body = BlockBody; fn set_download_range( @@ -786,7 +785,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index c2a7c6ede02f..169d556348b2 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -258,7 +258,7 @@ pub(crate) fn missing_static_data_error( segment: StaticFileSegment, ) -> Result where - Provider: BlockReader
+ StaticFileProviderFactory, + Provider: BlockReader + StaticFileProviderFactory, { let mut last_block = static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); From fdc9b3188a058288e9d25d69f94831cc0441a8bb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 10 Dec 2024 11:07:15 +0100 Subject: [PATCH 49/70] chore: use trait fn for blob gas (#13261) --- crates/primitives-traits/src/block/body.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index ed60796ce1bc..20f1cb9c159a 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloc::{fmt, vec::Vec}; use alloy_consensus::Transaction; -use alloy_eips::{eip2718::Encodable2718, eip4844::DATA_GAS_PER_BLOB, eip4895::Withdrawals}; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; use alloy_primitives::{Bytes, B256}; /// Helper trait that unifies all behaviour required by transaction to support full node operations. @@ -73,12 +73,7 @@ pub trait BlockBody: /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. fn blob_gas_used(&self) -> u64 { - // TODO(mattss): simplify after - self.transactions() - .iter() - .filter_map(|tx| tx.blob_versioned_hashes()) - .map(|hashes| hashes.len() as u64 * DATA_GAS_PER_BLOB) - .sum() + self.transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() } /// Returns an iterator over all blob versioned hashes in the block body. From 36c0142e583668b7dd58428002ad5d1773d8c4a6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 10 Dec 2024 12:10:47 +0100 Subject: [PATCH 50/70] chore: release 1.1.3 (#13262) --- Cargo.lock | 244 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bcc998cf21d..92b089a0eafd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2647,7 +2647,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5532,7 +5532,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.2" +version = "1.1.3" dependencies = [ "clap", "reth-cli-util", @@ -6479,7 +6479,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6552,7 +6552,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6580,7 +6580,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6636,7 +6636,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6672,7 +6672,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6710,7 +6710,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6725,7 +6725,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6754,7 +6754,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6775,7 +6775,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-genesis", "clap", @@ -6788,7 +6788,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.2" +version = "1.1.3" dependencies = [ "ahash", "alloy-consensus", @@ -6856,7 +6856,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-tasks", "tokio", @@ -6865,7 +6865,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6883,7 +6883,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6906,7 +6906,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.2" +version = "1.1.3" dependencies = [ "convert_case", "proc-macro2", @@ -6917,7 +6917,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "eyre", @@ -6933,7 +6933,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6946,7 +6946,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6963,7 +6963,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6986,7 +6986,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7027,7 +7027,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7056,7 +7056,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7085,7 +7085,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7102,7 +7102,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7129,7 +7129,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7153,7 +7153,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7181,7 +7181,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7220,7 +7220,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7268,7 +7268,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.2" +version = "1.1.3" dependencies = [ "aes", "alloy-primitives", @@ -7298,7 +7298,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7330,7 +7330,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7350,7 +7350,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.2" +version = "1.1.3" dependencies = [ "futures", "pin-project", @@ -7378,7 +7378,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7434,7 +7434,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7466,7 +7466,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7478,7 +7478,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-eips", @@ -7515,7 +7515,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7540,7 +7540,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.2" +version = "1.1.3" dependencies = [ "clap", "eyre", @@ -7551,7 +7551,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7566,7 +7566,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7586,7 +7586,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7606,7 +7606,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7631,11 +7631,11 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.1.2" +version = "1.1.3" [[package]] name = "reth-etl" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "rayon", @@ -7645,7 +7645,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7673,7 +7673,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7696,7 +7696,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7711,7 +7711,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7731,7 +7731,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7774,7 +7774,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "eyre", @@ -7807,7 +7807,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7824,7 +7824,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "serde", "serde_json", @@ -7833,7 +7833,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7859,7 +7859,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.2" +version = "1.1.3" dependencies = [ "async-trait", "bytes", @@ -7881,7 +7881,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.2" +version = "1.1.3" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7902,7 +7902,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.2" +version = "1.1.3" dependencies = [ "bindgen", "cc", @@ -7910,7 +7910,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.2" +version = "1.1.3" dependencies = [ "futures", "metrics", @@ -7921,14 +7921,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.2" +version = "1.1.3" dependencies = [ "futures-util", "if-addrs", @@ -7942,7 +7942,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8005,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8027,7 +8027,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8049,7 +8049,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8065,7 +8065,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -8078,7 +8078,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.2" +version = "1.1.3" dependencies = [ "anyhow", "bincode", @@ -8096,7 +8096,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8116,7 +8116,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8181,7 +8181,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8232,7 +8232,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8281,7 +8281,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8304,7 +8304,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.2" +version = "1.1.3" dependencies = [ "eyre", "http", @@ -8327,7 +8327,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8338,7 +8338,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8358,7 +8358,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8409,7 +8409,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8428,7 +8428,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8458,7 +8458,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8469,7 +8469,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8523,7 +8523,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8559,7 +8559,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8583,7 +8583,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8628,7 +8628,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.2" +version = "1.1.3" dependencies = [ "reth-codecs", "reth-db-api", @@ -8639,7 +8639,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8661,7 +8661,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types-engine", "async-trait", @@ -8674,7 +8674,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8692,7 +8692,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8701,7 +8701,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8711,7 +8711,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8758,7 +8758,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8786,7 +8786,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8836,7 +8836,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8868,7 +8868,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -8888,7 +8888,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8906,7 +8906,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8978,7 +8978,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -9002,7 +9002,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9021,7 +9021,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9071,7 +9071,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9109,7 +9109,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9152,7 +9152,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9194,7 +9194,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-rpc-types-engine", "http", @@ -9211,7 +9211,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9226,7 +9226,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9243,7 +9243,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9294,7 +9294,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9323,7 +9323,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -9340,7 +9340,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "assert_matches", @@ -9364,7 +9364,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "clap", @@ -9375,7 +9375,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9399,7 +9399,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9412,7 +9412,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.2" +version = "1.1.3" dependencies = [ "auto_impl", "dyn-clone", @@ -9429,7 +9429,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9443,7 +9443,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.2" +version = "1.1.3" dependencies = [ "tokio", "tokio-stream", @@ -9452,7 +9452,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.2" +version = "1.1.3" dependencies = [ "clap", "eyre", @@ -9466,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9513,7 +9513,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9542,7 +9542,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9572,7 +9572,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9601,7 +9601,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9628,7 +9628,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.2" +version = "1.1.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9652,7 +9652,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.1.2" +version = "1.1.3" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index beef7d330dd4..ab2fba7b99e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.2" +version = "1.1.3" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" From d97449dae495a2243000078fa30b3b164ef5891b Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 17:50:16 +0400 Subject: [PATCH 51/70] fix: bounds for BundleApi (#13267) --- crates/node/builder/src/rpc.rs | 12 +++++++++--- crates/optimism/node/src/node.rs | 2 ++ crates/rpc/rpc-builder/src/lib.rs | 28 ++++++++++++++++++++-------- crates/rpc/rpc/src/eth/bundle.rs | 8 ++++++-- 4 files changed, 37 insertions(+), 13 deletions(-) diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index c8e08078bb98..32123b194e6b 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -18,7 +18,7 @@ use reth_node_core::{ version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; -use reth_primitives::EthPrimitives; +use reth_primitives::{EthPrimitives, PooledTransactionsElement}; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -33,6 +33,7 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::sync::Arc; use crate::EthApiBuilderCtx; @@ -403,7 +404,9 @@ where impl RpcAddOns where - N: FullNodeComponents, + N: FullNodeComponents< + Pool: TransactionPool>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners @@ -531,7 +534,10 @@ where impl NodeAddOns for RpcAddOns where - N: FullNodeComponents>, + N: FullNodeComponents< + Types: ProviderNodeTypes, + Pool: TransactionPool>, + >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 54ff36dabac0..e9e7e23bc9cd 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -244,6 +244,7 @@ where Storage = OpStorage, Engine = OpEngineTypes, >, + Pool: TransactionPool>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -294,6 +295,7 @@ where Storage = OpStorage, Engine = OpEngineTypes, >, + Pool: TransactionPool>, >, OpEngineValidator: EngineValidator<::Engine>, { diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index ce29b77f09d6..877e80897861 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -19,7 +19,7 @@ //! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::{Header, TransactionSigned}; +//! use reth_primitives::{Header, PooledTransactionsElement, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -55,8 +55,12 @@ //! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool> -//! + Unpin +//! Pool: TransactionPool< +//! Transaction: PoolTransaction< +//! Consensus = TransactionSigned, +//! Pooled = PooledTransactionsElement, +//! >, +//! > + Unpin //! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: @@ -98,7 +102,7 @@ //! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::{Header, TransactionSigned}; +//! use reth_primitives::{Header, PooledTransactionsElement, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -141,8 +145,12 @@ //! Header = reth_primitives::Header, //! > + AccountReader //! + ChangeSetReader, -//! Pool: TransactionPool> -//! + Unpin +//! Pool: TransactionPool< +//! Transaction: PoolTransaction< +//! Consensus = TransactionSigned, +//! Pooled = PooledTransactionsElement, +//! >, +//! > + Unpin //! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: @@ -222,7 +230,7 @@ use reth_consensus::FullConsensus; use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::NodePrimitives; +use reth_primitives::{NodePrimitives, PooledTransactionsElement}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, @@ -240,7 +248,7 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; +use reth_transaction_pool::{noop::NoopTransactionPool, PoolTransaction, TransactionPool}; use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; @@ -315,6 +323,7 @@ where Receipt = ::Receipt, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, BlockExecutor: BlockExecutorProvider, { @@ -706,6 +715,7 @@ where Receipt = ::Receipt, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, { let Self { @@ -831,6 +841,7 @@ where Block = ::Block, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, Pool: TransactionPool::Transaction>, { @@ -1371,6 +1382,7 @@ where Receipt = ::Receipt, Header = ::BlockHeader, >, + Pool: TransactionPool>, >, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::FullConsensus + Clone + 'static, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 478d1de1c51f..b12e021335ed 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -285,10 +285,14 @@ where #[async_trait::async_trait] impl EthCallBundleApiServer for EthBundle where - Eth: EthTransactions + LoadPendingBlock + Call + 'static, + Eth: EthTransactions< + Pool: TransactionPool>, + > + LoadPendingBlock + + Call + + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { - Self::call_bundle(self, request).await.map_err(Into::into) + self.call_bundle(request).await.map_err(Into::into) } } From 4c39b98b621c53524c6533a9c7b52fc42c25abd6 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:56:35 +0000 Subject: [PATCH 52/70] chore: add `StorageLocation` to `BlockBodyWriter` trait (#13266) --- crates/optimism/node/src/node.rs | 8 +++-- .../src/providers/database/provider.rs | 20 ++++++------ crates/storage/provider/src/traits/block.rs | 31 +++---------------- crates/storage/storage-api/src/chain.rs | 6 +++- crates/storage/storage-api/src/storage.rs | 23 ++++++++++++++ 5 files changed, 47 insertions(+), 41 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index e9e7e23bc9cd..e77b50f1e8f0 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -36,7 +36,7 @@ use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_primitives::{BlockBody, PooledTransactionsElement, TransactionSigned}; use reth_provider::{ providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, - ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, + ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, StorageLocation, }; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -56,16 +56,18 @@ impl> BlockBodyWriter for &self, provider: &Provider, bodies: Vec<(u64, Option)>, + write_to: StorageLocation, ) -> ProviderResult<()> { - self.0.write_block_bodies(provider, bodies) + self.0.write_block_bodies(provider, bodies, write_to) } fn remove_block_bodies_above( &self, provider: &Provider, block: alloy_primitives::BlockNumber, + remove_from: StorageLocation, ) -> ProviderResult<()> { - self.0.remove_block_bodies_above(provider, block) + self.0.remove_block_bodies_above(provider, block, remove_from) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 05e4ed4c0c00..aa6a167d7153 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2893,12 +2893,12 @@ impl BlockWrite fn append_block_bodies( &self, bodies: Vec<(BlockNumber, Option>)>, - write_transactions_to: StorageLocation, + write_to: StorageLocation, ) -> ProviderResult<()> { let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; // Initialize writer if we will be writing transactions to staticfiles - let mut tx_static_writer = write_transactions_to + let mut tx_static_writer = write_to .static_files() .then(|| { self.static_file_provider.get_writer(from_block, StaticFileSegment::Transactions) @@ -2909,7 +2909,7 @@ impl BlockWrite let mut tx_block_cursor = self.tx.cursor_write::()?; // Initialize cursor if we will be writing transactions to database - let mut tx_cursor = write_transactions_to + let mut tx_cursor = write_to .database() .then(|| self.tx.cursor_write::>>()) .transpose()?; @@ -2962,7 +2962,7 @@ impl BlockWrite ); } - self.storage.writer().write_block_bodies(self, bodies)?; + self.storage.writer().write_block_bodies(self, bodies, write_to)?; Ok(()) } @@ -2970,7 +2970,7 @@ impl BlockWrite fn remove_blocks_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()> { let mut canonical_headers_cursor = self.tx.cursor_write::()?; let mut rev_headers = canonical_headers_cursor.walk_back(None)?; @@ -3010,7 +3010,7 @@ impl BlockWrite self.remove::(unwind_tx_from..)?; - self.remove_bodies_above(block, remove_transactions_from)?; + self.remove_bodies_above(block, remove_from)?; Ok(()) } @@ -3018,9 +3018,9 @@ impl BlockWrite fn remove_bodies_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()> { - self.storage.writer().remove_block_bodies_above(self, block)?; + self.storage.writer().remove_block_bodies_above(self, block, remove_from)?; // First transaction to be removed let unwind_tx_from = self @@ -3032,11 +3032,11 @@ impl BlockWrite self.remove::(block + 1..)?; self.remove::(unwind_tx_from..)?; - if remove_transactions_from.database() { + if remove_from.database() { self.remove::>>(unwind_tx_from..)?; } - if remove_transactions_from.static_files() { + if remove_from.static_files() { let static_file_tx_num = self .static_file_provider .get_highest_static_file_tx(StaticFileSegment::Transactions); diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index d12f240e6164..9c5821057fc8 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -3,33 +3,10 @@ use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::NodePrimitives; use reth_primitives::SealedBlockWithSenders; -use reth_storage_api::NodePrimitivesProvider; +use reth_storage_api::{NodePrimitivesProvider, StorageLocation}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; -/// An enum that represents the storage location for a piece of data. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum StorageLocation { - /// Write only to static files. - StaticFiles, - /// Write only to the database. - Database, - /// Write to both the database and static files. - Both, -} - -impl StorageLocation { - /// Returns true if the storage location includes static files. - pub const fn static_files(&self) -> bool { - matches!(self, Self::StaticFiles | Self::Both) - } - - /// Returns true if the storage location includes the database. - pub const fn database(&self) -> bool { - matches!(self, Self::Database | Self::Both) - } -} - /// `BlockExecution` Writer pub trait BlockExecutionWriter: NodePrimitivesProvider> + BlockWriter + Send + Sync @@ -120,7 +97,7 @@ pub trait BlockWriter: Send + Sync { fn append_block_bodies( &self, bodies: Vec<(BlockNumber, Option<::Body>)>, - write_transactions_to: StorageLocation, + write_to: StorageLocation, ) -> ProviderResult<()>; /// Removes all blocks above the given block number from the database. @@ -129,14 +106,14 @@ pub trait BlockWriter: Send + Sync { fn remove_blocks_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()>; /// Removes all block bodies above the given block number from the database. fn remove_bodies_above( &self, block: BlockNumber, - remove_transactions_from: StorageLocation, + remove_from: StorageLocation, ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 9b9c24c68633..978c4f51b5f4 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -1,4 +1,4 @@ -use crate::DBProvider; +use crate::{DBProvider, StorageLocation}; use alloy_primitives::BlockNumber; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_db::{ @@ -22,6 +22,7 @@ pub trait BlockBodyWriter { &self, provider: &Provider, bodies: Vec<(BlockNumber, Option)>, + write_to: StorageLocation, ) -> ProviderResult<()>; /// Removes all block bodies above the given block number from the database. @@ -29,6 +30,7 @@ pub trait BlockBodyWriter { &self, provider: &Provider, block: BlockNumber, + remove_from: StorageLocation, ) -> ProviderResult<()>; } @@ -87,6 +89,7 @@ where &self, provider: &Provider, bodies: Vec<(u64, Option)>, + _write_to: StorageLocation, ) -> ProviderResult<()> { let mut ommers_cursor = provider.tx_ref().cursor_write::()?; let mut withdrawals_cursor = @@ -116,6 +119,7 @@ where &self, provider: &Provider, block: BlockNumber, + _remove_from: StorageLocation, ) -> ProviderResult<()> { provider.tx_ref().unwind_table_by_num::(block)?; provider.tx_ref().unwind_table_by_num::(block)?; diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index e1443347e4bb..0544f9a74c1c 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -41,3 +41,26 @@ pub trait StorageChangeSetReader: Send + Sync { block_number: BlockNumber, ) -> ProviderResult>; } + +/// An enum that represents the storage location for a piece of data. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum StorageLocation { + /// Write only to static files. + StaticFiles, + /// Write only to the database. + Database, + /// Write to both the database and static files. + Both, +} + +impl StorageLocation { + /// Returns true if the storage location includes static files. + pub const fn static_files(&self) -> bool { + matches!(self, Self::StaticFiles | Self::Both) + } + + /// Returns true if the storage location includes the database. + pub const fn database(&self) -> bool { + matches!(self, Self::Database | Self::Both) + } +} From 88a9bd72d45e20ee8b5be2590aaaacfbd7a0e20c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 10 Dec 2024 19:38:37 +0400 Subject: [PATCH 53/70] feat: make engine block downloaders generic over block (#13273) --- crates/consensus/beacon/src/engine/mod.rs | 21 +++--- crates/engine/local/src/service.rs | 4 +- crates/engine/service/src/service.rs | 16 ++--- crates/engine/tree/src/download.rs | 71 +++++++++++--------- crates/engine/tree/src/engine.rs | 2 +- crates/node/builder/src/launch/engine.rs | 4 +- crates/primitives-traits/src/block/header.rs | 3 +- crates/storage/provider/src/providers/mod.rs | 13 +++- 8 files changed, 74 insertions(+), 60 deletions(-) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index c41f9283db85..bbf10256fe23 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -26,10 +26,11 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{EthPrimitives, Head, SealedBlock, SealedHeader}; +use reth_primitives::{Head, SealedBlock, SealedHeader}; use reth_provider::{ - providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, - ChainSpecProvider, ProviderError, StageCheckpointReader, + providers::{ProviderNodeTypes, TreeNodeTypes}, + BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, + StageCheckpointReader, }; use reth_stages_api::{ControlFlow, Pipeline, PipelineTarget, StageId}; use reth_tasks::TaskSpawner; @@ -84,15 +85,9 @@ const MAX_INVALID_HEADERS: u32 = 512u32; pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: - ProviderNodeTypes + NodeTypesWithEngine -{ -} +pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} -impl EngineNodeTypes for T where - T: ProviderNodeTypes + NodeTypesWithEngine -{ -} +impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} /// Represents a pending forkchoice update. /// @@ -232,7 +227,7 @@ where impl BeaconConsensusEngine where - N: EngineNodeTypes, + N: TreeNodeTypes, BT: BlockchainTreeEngine + BlockReader, Header = HeaderTy> + BlockIdReader @@ -1801,7 +1796,7 @@ where /// receiver and forwarding them to the blockchain tree. impl Future for BeaconConsensusEngine where - N: EngineNodeTypes, + N: TreeNodeTypes, Client: EthBlockClient + 'static, BT: BlockchainTreeEngine + BlockReader, Header = HeaderTy> diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 57fdc0c254ea..3c7bc72baed5 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -64,7 +64,7 @@ where /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc, + consensus: Arc>, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, @@ -122,7 +122,7 @@ impl Stream for LocalEngineService where N: EngineNodeTypes, { - type Item = ChainEvent; + type Item = ChainEvent>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 5dfe4184257b..aeaf364a8cdc 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -16,8 +16,8 @@ pub use reth_engine_tree::{ engine::EngineApiEvent, }; use reth_evm::execute::BlockExecutorProvider; -use reth_network_p2p::EthBlockClient; -use reth_node_types::{BlockTy, NodeTypes, NodeTypesWithEngine}; +use reth_network_p2p::BlockClient; +use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; @@ -42,7 +42,7 @@ type EngineServiceType = ChainOrchestrator< ::Primitives, >, EngineMessageStream<::Engine>, - BasicBlockDownloader, + BasicBlockDownloader>, >, PipelineSync, >; @@ -53,7 +53,7 @@ type EngineServiceType = ChainOrchestrator< pub struct EngineService where N: EngineNodeTypes, - Client: EthBlockClient + 'static, + Client: BlockClient
, Body = BodyTy> + 'static, E: BlockExecutorProvider + 'static, { orchestrator: EngineServiceType, @@ -63,13 +63,13 @@ where impl EngineService where N: EngineNodeTypes, - Client: EthBlockClient + 'static, + Client: BlockClient
, Body = BodyTy> + 'static, E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc, + consensus: Arc>, executor_factory: E, chain_spec: Arc, client: Client, @@ -131,10 +131,10 @@ where impl Stream for EngineService where N: EngineNodeTypes, - Client: EthBlockClient + 'static, + Client: BlockClient
, Body = BodyTy> + 'static, E: BlockExecutorProvider + 'static, { - type Item = ChainEvent; + type Item = ChainEvent>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut orchestrator = self.project().orchestrator; diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 8a7ea583f0fc..199e5f964061 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -1,14 +1,16 @@ //! Handler that can download blocks on demand (e.g. from the network). use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics}; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::FutureExt; use reth_consensus::Consensus; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, EthBlockClient, + BlockClient, }; -use reth_primitives::{SealedBlock, SealedBlockWithSenders}; +use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_primitives_traits::Block; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque}, @@ -20,11 +22,14 @@ use tracing::trace; /// A trait that can download blocks on demand. pub trait BlockDownloader: Send + Sync { + /// Type of the block being downloaded. + type Block: Block; + /// Handle an action. fn on_action(&mut self, action: DownloadAction); /// Advance in progress requests if any - fn poll(&mut self, cx: &mut Context<'_>) -> Poll; + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; } /// Actions that can be performed by the block downloader. @@ -38,9 +43,9 @@ pub enum DownloadAction { /// Outcome of downloaded blocks. #[derive(Debug)] -pub enum DownloadOutcome { +pub enum DownloadOutcome { /// Downloaded blocks. - Blocks(Vec), + Blocks(Vec>), /// New download started. NewDownloadStarted { /// How many blocks are pending in this download. @@ -52,7 +57,7 @@ pub enum DownloadOutcome { /// Basic [`BlockDownloader`]. #[allow(missing_debug_implementations)] -pub struct BasicBlockDownloader +pub struct BasicBlockDownloader where Client: BlockClient + 'static, { @@ -64,16 +69,17 @@ where inflight_block_range_requests: Vec>, /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - set_buffered_blocks: BinaryHeap>, + set_buffered_blocks: BinaryHeap>>, /// Engine download metrics. metrics: BlockDownloaderMetrics, /// Pending events to be emitted. - pending_events: VecDeque, + pending_events: VecDeque>, } -impl BasicBlockDownloader +impl BasicBlockDownloader where - Client: EthBlockClient + 'static, + Client: BlockClient
+ 'static, + B: Block, { /// Create a new instance pub fn new( @@ -174,20 +180,23 @@ where } /// Adds a pending event to the FIFO queue. - fn push_pending_event(&mut self, pending_event: DownloadOutcome) { + fn push_pending_event(&mut self, pending_event: DownloadOutcome) { self.pending_events.push_back(pending_event); } /// Removes a pending event from the FIFO queue. - fn pop_pending_event(&mut self) -> Option { + fn pop_pending_event(&mut self) -> Option> { self.pending_events.pop_front() } } -impl BlockDownloader for BasicBlockDownloader +impl BlockDownloader for BasicBlockDownloader where - Client: EthBlockClient, + Client: BlockClient
, + B: Block, { + type Block = B; + /// Handles incoming download actions. fn on_action(&mut self, action: DownloadAction) { match action { @@ -197,7 +206,7 @@ where } /// Advances the download process. - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { if let Some(pending_event) = self.pop_pending_event() { return Poll::Ready(pending_event); } @@ -244,7 +253,7 @@ where } // drain all unique element of the block buffer if there are any - let mut downloaded_blocks: Vec = + let mut downloaded_blocks: Vec> = Vec::with_capacity(self.set_buffered_blocks.len()); while let Some(block) = self.set_buffered_blocks.pop() { // peek ahead and pop duplicates @@ -264,29 +273,29 @@ where /// A wrapper type around [`SealedBlockWithSenders`] that implements the [Ord] /// trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlockWithSenders(SealedBlockWithSenders); +struct OrderedSealedBlockWithSenders(SealedBlockWithSenders); -impl PartialOrd for OrderedSealedBlockWithSenders { +impl PartialOrd for OrderedSealedBlockWithSenders { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedSealedBlockWithSenders { +impl Ord for OrderedSealedBlockWithSenders { fn cmp(&self, other: &Self) -> Ordering { - self.0.number.cmp(&other.0.number) + self.0.number().cmp(&other.0.number()) } } -impl From for OrderedSealedBlockWithSenders { - fn from(block: SealedBlock) -> Self { +impl From> for OrderedSealedBlockWithSenders { + fn from(block: SealedBlockFor) -> Self { let senders = block.senders().unwrap_or_default(); Self(SealedBlockWithSenders { block, senders }) } } -impl From for SealedBlockWithSenders { - fn from(value: OrderedSealedBlockWithSenders) -> Self { +impl From> for SealedBlockWithSenders { + fn from(value: OrderedSealedBlockWithSenders) -> Self { let senders = value.0.senders; Self { block: value.0.block, senders } } @@ -295,12 +304,14 @@ impl From for SealedBlockWithSenders { /// A [`BlockDownloader`] that does nothing. #[derive(Debug, Clone, Default)] #[non_exhaustive] -pub struct NoopBlockDownloader; +pub struct NoopBlockDownloader(core::marker::PhantomData); + +impl BlockDownloader for NoopBlockDownloader { + type Block = B; -impl BlockDownloader for NoopBlockDownloader { fn on_action(&mut self, _event: DownloadAction) {} - fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Pending } } @@ -318,7 +329,7 @@ mod tests { use std::{future::poll_fn, sync::Arc}; struct TestHarness { - block_downloader: BasicBlockDownloader, + block_downloader: BasicBlockDownloader, client: TestFullBlockClient, } @@ -385,7 +396,7 @@ mod tests { // ensure they are in ascending order for num in 1..=TOTAL_BLOCKS { - assert_eq!(blocks[num-1].number, num as u64); + assert_eq!(blocks[num-1].number(), num as u64); } }); } @@ -423,7 +434,7 @@ mod tests { // ensure they are in ascending order for num in 1..=TOTAL_BLOCKS { - assert_eq!(blocks[num-1].number, num as u64); + assert_eq!(blocks[num-1].number(), num as u64); } }); } diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 9fa0a8c1d214..dfc68fb73b39 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -67,7 +67,7 @@ impl EngineHandler { impl ChainHandler for EngineHandler where - T: EngineRequestHandler, + T: EngineRequestHandler, S: Stream + Send + Sync + Unpin + 'static, ::Item: Into, D: BlockDownloader, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 054def94e50d..9a7e83b66fca 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -27,7 +27,7 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_primitives::EthereumHardforks; +use reth_primitives::{EthPrimitives, EthereumHardforks}; use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; @@ -69,7 +69,7 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: EngineNodeTypes, + Types: EngineNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, AO: RethRpcAddOns> diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs index 42d0153b19c6..e1406df49477 100644 --- a/crates/primitives-traits/src/block/header.rs +++ b/crates/primitives-traits/src/block/header.rs @@ -1,6 +1,6 @@ //! Block header data primitive. -use core::fmt; +use core::{fmt, hash::Hash}; use alloy_primitives::Sealable; @@ -18,6 +18,7 @@ pub trait BlockHeader: + Sync + Unpin + Clone + + Hash + Default + fmt::Debug + PartialEq diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b4a99541a89a..358468d782c1 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -26,7 +26,8 @@ use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_node_types::{ - BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy, + BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, + ReceiptTy, TxTy, }; use reth_primitives::{ Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, @@ -104,8 +105,14 @@ impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB /// A helper trait with requirements for [`NodeTypesForProvider`] to be used within legacy /// blockchain tree. -pub trait NodeTypesForTree: NodeTypesForProvider {} -impl NodeTypesForTree for T where T: NodeTypesForProvider {} +pub trait NodeTypesForTree: + NodeTypesForProvider + NodeTypesWithEngine +{ +} +impl NodeTypesForTree for T where + T: NodeTypesForProvider + NodeTypesWithEngine +{ +} /// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain /// tree. From 8aada7a243bea18acbaa1cd50f99462712fd4ba0 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 10 Dec 2024 18:29:07 +0000 Subject: [PATCH 54/70] feat(engine): parallel sparse storage roots (#13269) --- crates/engine/tree/src/tree/root.rs | 56 ++++++++++++++++--------- crates/evm/execution-errors/src/trie.rs | 4 +- crates/trie/sparse/src/state.rs | 21 +++++++++- 3 files changed, 57 insertions(+), 24 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index e2ed6aa14706..a8c455265dd5 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,6 +1,7 @@ //! State root task related functionality. use alloy_primitives::map::{HashMap, HashSet}; +use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_evm::system_calls::OnStateHook; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, @@ -567,30 +568,45 @@ fn update_sparse_trie( trie.reveal_multiproof(targets, multiproof)?; // Update storage slots with new values and calculate storage roots. - for (address, storage) in state.storages { - trace!(target: "engine::root::sparse", ?address, "Updating storage"); - let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; - - if storage.wiped { - trace!(target: "engine::root::sparse", ?address, "Wiping storage"); - storage_trie.wipe(); - } + let (tx, rx) = mpsc::channel(); + state + .storages + .into_iter() + .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) + .par_bridge() + .map(|(address, storage, storage_trie)| { + trace!(target: "engine::root::sparse", ?address, "Updating storage"); + let mut storage_trie = storage_trie.ok_or(SparseTrieError::Blind)?; + + if storage.wiped { + trace!(target: "engine::root::sparse", ?address, "Wiping storage"); + storage_trie.wipe()?; + } - for (slot, value) in storage.storage { - let slot_nibbles = Nibbles::unpack(slot); - if value.is_zero() { - trace!(target: "engine::root::sparse", ?address, ?slot, "Removing storage slot"); + for (slot, value) in storage.storage { + let slot_nibbles = Nibbles::unpack(slot); + if value.is_zero() { + trace!(target: "engine::root::sparse", ?address, ?slot, "Removing storage slot"); - // TODO: handle blinded node error - storage_trie.remove_leaf(&slot_nibbles)?; - } else { - trace!(target: "engine::root::sparse", ?address, ?slot, "Updating storage slot"); - storage_trie - .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + storage_trie.remove_leaf(&slot_nibbles)?; + } else { + trace!(target: "engine::root::sparse", ?address, ?slot, "Updating storage slot"); + storage_trie + .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + } } - } - storage_trie.root(); + storage_trie.root(); + + SparseStateTrieResult::Ok((address, storage_trie)) + }) + .for_each_init(|| tx.clone(), |tx, result| { + tx.send(result).unwrap() + }); + drop(tx); + for result in rx { + let (address, storage_trie) = result?; + trie.insert_storage_trie(address, storage_trie); } // Update accounts with new values diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 8d04f97e8ea4..ba1bfcc02370 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -107,14 +107,14 @@ pub enum SparseTrieError { /// Path to the node. path: Nibbles, /// Node that was at the path when revealing. - node: Box, + node: Box, }, /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), /// Other. #[error(transparent)] - Other(#[from] Box), + Other(#[from] Box), } /// Trie witness errors. diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 6638632f0adc..6f5db4eda7f9 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -97,9 +97,26 @@ impl SparseStateTrie { /// Returns mutable reference to storage sparse trie if it was revealed. pub fn storage_trie_mut( &mut self, - account: &B256, + address: &B256, ) -> Option<&mut RevealedSparseTrie> { - self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) + self.storages.get_mut(address).and_then(|e| e.as_revealed_mut()) + } + + /// Takes the storage trie for the provided address. + pub fn take_storage_trie( + &mut self, + address: &B256, + ) -> Option> { + self.storages.remove(address) + } + + /// Inserts storage trie for the provided address. + pub fn insert_storage_trie( + &mut self, + address: B256, + storage_trie: SparseTrie, + ) { + self.storages.insert(address, storage_trie); } /// Reveal unknown trie paths from provided leaf path and its proof for the account. From 62e2cbfe86aba1bf884ab10937426f36c1caaee1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 10 Dec 2024 19:43:42 +0100 Subject: [PATCH 55/70] chore: remove unused evm env provider fns (#13272) --- crates/evm/src/provider.rs | 44 +------------------ .../src/providers/blockchain_provider.rs | 32 ++------------ .../provider/src/providers/consistent.rs | 39 ++-------------- .../provider/src/providers/database/mod.rs | 32 ++------------ .../src/providers/database/provider.rs | 39 ++-------------- crates/storage/provider/src/providers/mod.rs | 32 ++------------ .../storage/provider/src/test_utils/mock.rs | 36 +++------------ .../storage/provider/src/test_utils/noop.rs | 36 +++------------ 8 files changed, 27 insertions(+), 263 deletions(-) diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 6ef4cefbb485..e0256e9a8a47 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -2,15 +2,14 @@ use crate::ConfigureEvmEnv; use alloy_consensus::Header; -use alloy_eips::BlockHashOrNumber; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// A provider type that knows chain specific information required to configure a /// [`CfgEnvWithHandlerCfg`]. /// /// This type is mainly used to provide required data to configure the EVM environment that is -/// usually stored on disk. +/// not part of the block and stored separately (on disk), for example the total difficulty. #[auto_impl::auto_impl(&, Arc)] pub trait EvmEnvProvider: Send + Sync { /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the @@ -20,45 +19,6 @@ pub trait EvmEnvProvider: Send + Sync { header: &H, evm_config: EvmConfig, ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> - where - EvmConfig: ConfigureEvmEnv
, - { - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - let mut block_env = BlockEnv::default(); - self.fill_env_with_header(&mut cfg, &mut block_env, header, evm_config)?; - Ok((cfg, block_env)) - } - - /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given - /// block header. - fn fill_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - header: &H, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
; - - /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given - /// [BlockHashOrNumber]. - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
; - - /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given block header. - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &H, - evm_config: EvmConfig, - ) -> ProviderResult<()> where EvmConfig: ConfigureEvmEnv
; } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 68f1498eccb6..2d624fbc74cb 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -493,41 +493,15 @@ impl StageCheckpointReader for BlockchainProvider2 { } impl EvmEnvProvider> for BlockchainProvider2 { - fn fill_env_with_header( + fn env_with_header( &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, header: &HeaderTy, evm_config: EvmConfig, - ) -> ProviderResult<()> + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
>, { - self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) - } - - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) - } - - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &HeaderTy, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) + self.consistent_provider()?.env_with_header(header, evm_config) } } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 927a78fe19e2..4604f29db866 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1231,51 +1231,18 @@ impl StageCheckpointReader for ConsistentProvider { } impl EvmEnvProvider> for ConsistentProvider { - fn fill_env_with_header( + fn env_with_header( &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, header: &HeaderTy, evm_config: EvmConfig, - ) -> ProviderResult<()> + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self .header_td_by_number(header.number())? .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; - evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); - Ok(()) - } - - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_cfg_env_with_header(cfg, &header, evm_config) - } - - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &HeaderTy, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - let total_difficulty = self - .header_td_by_number(header.number())? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; - evm_config.fill_cfg_env(cfg, header, total_difficulty); - Ok(()) + Ok(evm_config.cfg_and_block_env(header, total_difficulty)) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 85b734ef6616..bb0e49420eae 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -590,41 +590,15 @@ impl StageCheckpointReader for ProviderFactory { } impl EvmEnvProvider> for ProviderFactory { - fn fill_env_with_header( + fn env_with_header( &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, header: &HeaderTy, evm_config: EvmConfig, - ) -> ProviderResult<()> + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
>, { - self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) - } - - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - self.provider()?.fill_cfg_env_at(cfg, at, evm_config) - } - - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &HeaderTy, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - self.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) + self.provider()?.env_with_header(header, evm_config) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index aa6a167d7153..da8fb97cedc6 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1638,51 +1638,18 @@ impl> Withdrawals impl EvmEnvProvider> for DatabaseProvider { - fn fill_env_with_header( + fn env_with_header( &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, header: &HeaderTy, evm_config: EvmConfig, - ) -> ProviderResult<()> + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self .header_td_by_number(header.number())? .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; - evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); - Ok(()) - } - - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_cfg_env_with_header(cfg, &header, evm_config) - } - - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &HeaderTy, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
>, - { - let total_difficulty = self - .header_td_by_number(header.number())? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; - evm_config.fill_cfg_env(cfg, header, total_difficulty); - Ok(()) + Ok(evm_config.cfg_and_block_env(header, total_difficulty)) } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 358468d782c1..d02c59278666 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -598,41 +598,15 @@ impl StageCheckpointReader for BlockchainProvider { } impl EvmEnvProvider for BlockchainProvider { - fn fill_env_with_header( + fn env_with_header( &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, header: &Header, evm_config: EvmConfig, - ) -> ProviderResult<()> + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
, { - self.database.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) - } - - fn fill_cfg_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - self.database.provider()?.fill_cfg_env_at(cfg, at, evm_config) - } - - fn fill_cfg_env_with_header( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - self.database.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) + self.database.provider()?.env_with_header(header, evm_config) } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index abe1096a1bc7..2aa70a47b239 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -717,41 +717,15 @@ impl StateProvider for MockEthProvider { } impl EvmEnvProvider for MockEthProvider { - fn fill_env_with_header( + fn env_with_header( &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _block_env: &mut BlockEnv, - _header: &Header, - _evm_config: EvmConfig, - ) -> ProviderResult<()> + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
, { - Ok(()) - } - - fn fill_cfg_env_at( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _at: BlockHashOrNumber, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) - } - - fn fill_cfg_env_with_header( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _header: &Header, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) + Ok(evm_config.cfg_and_block_env(header, U256::MAX)) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 3846313b9f4f..b72df25af289 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -436,41 +436,15 @@ impl StateProvider for NoopProvider { } impl EvmEnvProvider for NoopProvider { - fn fill_env_with_header( + fn env_with_header( &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _block_env: &mut BlockEnv, - _header: &Header, - _evm_config: EvmConfig, - ) -> ProviderResult<()> + header: &Header, + evm_config: EvmConfig, + ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where EvmConfig: ConfigureEvmEnv
, { - Ok(()) - } - - fn fill_cfg_env_at( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _at: BlockHashOrNumber, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) - } - - fn fill_cfg_env_with_header( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _header: &Header, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) + Ok(evm_config.cfg_and_block_env(header, U256::MAX)) } } From 73f1583455ace8e9012271e40bd4fe1b42d87715 Mon Sep 17 00:00:00 2001 From: morito Date: Wed, 11 Dec 2024 05:06:21 +0900 Subject: [PATCH 56/70] Bump alloy to 0.8.0 (#13268) Co-authored-by: Matthias Seitz --- Cargo.lock | 190 +++++++++--------- Cargo.toml | 78 +++---- .../beacon/src/engine/invalid_headers.rs | 2 +- crates/net/eth-wire-types/src/broadcast.rs | 2 +- .../primitives/src/transaction/mod.rs | 12 +- .../primitives/src/transaction/signed.rs | 13 +- .../primitives-traits/src/transaction/mod.rs | 34 ---- crates/primitives/src/alloy_compat.rs | 2 +- crates/primitives/src/block.rs | 2 +- crates/primitives/src/transaction/mod.rs | 40 ++-- crates/primitives/src/transaction/pooled.rs | 26 +-- crates/rpc/rpc-server-types/src/result.rs | 1 - crates/rpc/rpc/src/otterscan.rs | 4 +- crates/storage/codecs/derive/src/arbitrary.rs | 2 +- 14 files changed, 192 insertions(+), 216 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92b089a0eafd..d7540dc437c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a101d4d016f47f13890a74290fdd17b05dd175191d9337bc600791fb96e4dea8" +checksum = "8ba14856660f31807ebb26ce8f667e814c72694e1077e97ef102e326ad580f3f" dependencies = [ "alloy-eips", "alloy-primitives", @@ -141,9 +141,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa60357dda9a3d0f738f18844bd6d0f4a5924cc5cf00bfad2ff1369897966123" +checksum = "28666307e76441e7af37a2b90cde7391c28112121bea59f4e0d804df8b20057e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -155,9 +155,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2869e4fb31331d3b8c58c7db567d1e4e4e94ef64640beda3b6dd9b7045690941" +checksum = "f3510769905590b8991a8e63a5e0ab4aa72cf07a13ab5fbe23f12f4454d161da" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80759b3f57b3b20fa7cd8fef6479930fc95461b58ff8adea6e87e618449c8a1d" +checksum = "41056bde53ae10ffbbf11618efbe1e0290859e5eab0fe9ef82ebdb62f12a866f" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -222,9 +222,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6755b093afef5925f25079dd5a7c8d096398b804ba60cb5275397b06b31689" +checksum = "47e922d558006ba371681d484d12aa73fe673d84884f83747730af7433c0e86d" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -243,9 +243,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeec8e6eab6e52b7c9f918748c9b811e87dbef7312a2e3a2ca1729a92966a6af" +checksum = "5dca170827a7ca156b43588faebf9e9d27c27d0fb07cab82cfd830345e2b24f5" dependencies = [ "alloy-primitives", "alloy-serde", @@ -255,9 +255,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac4b22b3e51cac09fd2adfcc73b55f447b4df669f983c13f7894ec82b607c63f" +checksum = "c357da577dfb56998d01f574d81ad7a1958d248740a7981b205d69d65a7da404" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -267,9 +267,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa077efe0b834bcd89ff4ba547f48fb081e4fdc3673dd7da1b295a2cf2bb7b7" +checksum = "9335278f50b0273e0a187680ee742bb6b154a948adf036f448575bacc5ccb315" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -281,9 +281,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209a1882a08e21aca4aac6e2a674dc6fcf614058ef8cb02947d63782b1899552" +checksum = "ad4e6ad4230df8c4a254c20f8d6a84ab9df151bfca13f463177dbc96571cc1f8" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -306,9 +306,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20219d1ad261da7a6331c16367214ee7ded41d001fabbbd656fbf71898b2773" +checksum = "c4df88a2f8020801e0fefce79471d3946d39ca3311802dbbd0ecfdeee5e972e3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -319,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffcf33dd319f21cd6f066d81cbdef0326d4bdaaf7cfe91110bc090707858e9f" +checksum = "2db5cefbc736b2b26a960dcf82279c70a03695dd11a0032a6dc27601eeb29182" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -336,9 +336,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9db948902dfbae96a73c2fbf1f7abec62af034ab883e4c777c3fd29702bd6e2c" +checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" dependencies = [ "alloy-rlp", "arbitrary", @@ -368,9 +368,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eefa6f4c798ad01f9b4202d02cea75f5ec11fa180502f4701e2b47965a8c0bb" +checksum = "5115c74c037714e1b02a86f742289113afa5d494b5ea58308ba8aa378e739101" dependencies = [ "alloy-chains", "alloy-consensus", @@ -409,9 +409,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aac9a7210e0812b1d814118f426f57eb7fc260a419224dd1c76d169879c06907" +checksum = "b073afa409698d1b9a30522565815f3bf7010e5b47b997cf399209e6110df097" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -450,9 +450,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed30bf1041e84cabc5900f52978ca345dd9969f2194a945e6fdec25b0620705c" +checksum = "5c6a0bd0ce5660ac48e4f3bb0c7c5c3a94db287a0be94971599d83928476cbcd" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -475,9 +475,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab686b0fa475d2a4f5916c5f07797734a691ec58e44f0f55d4746ea39cbcefb" +checksum = "374ac12e35bb90ebccd86e7c943ddba9590149a6e35cc4d9cd860d6635fd1018" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -488,9 +488,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f0874a976ccdf83a178ad93b64bec5b8c91a47428d714d544ca70258acfa07b" +checksum = "934b3865d0f9695dcc396e853e2197171f443cc46b7d3390c1e53a4d0198232b" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -500,9 +500,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d33bc190844626c08e21897736dbd7956ab323c09e6f141b118d1c8b7aff689e" +checksum = "f0b85a5f5f5d99047544f4ec31330ee15121dcb8ef5af3e791a5207e6b92b05b" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -512,9 +512,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200661999b6e235d9840be5d60a6e8ae2f0af9eb2a256dd378786744660e36ec" +checksum = "ea98f81bcd759dbfa3601565f9d7a02220d8ef1d294ec955948b90aaafbfd857" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -523,9 +523,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc37861dc8cbf5da35d346139fbe6e03ee7823cc21138a2c4a590d3b0b4b24be" +checksum = "6e13e94be8f6f5cb735e604f9db436430bf3773fdd41db7221edaa58c07c4c8a" dependencies = [ "alloy-eips", "alloy-primitives", @@ -538,9 +538,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0294b553785eb3fa7fff2e8aec45e82817258e7e6c9365c034a90cb6baeebc9" +checksum = "4fd14f68a482e67dfba52d404dfff1d3b0d9fc3b4775bd0923f3175d7661c3bd" dependencies = [ "alloy-primitives", "serde", @@ -548,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d297268357e3eae834ddd6888b15f764cbc0f4b3be9265f5f6ec239013f3d68" +checksum = "9ca5898f753ff0d15a0dc955c169523d8fee57e05bb5a38a398b3451b0b988be" dependencies = [ "alloy-consensus", "alloy-eips", @@ -569,9 +569,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0600b8b5e2dc0cab12cbf91b5a885c35871789fb7b3a57b434bd4fced5b7a8b" +checksum = "0e518b0a7771e00728f18be0708f828b18a1cfc542a7153bef630966a26388e0" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -591,9 +591,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093d618d5a42808e7ae26062f415a1e816fc27d3d32662c6ed52d0871b154894" +checksum = "e58dc4ff16cda220e28e24287024f68e48d5c205b3804b13adad3f79debf4cb8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -605,9 +605,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e073ab0e67429c60be281e181731132fd07d82e091c10c29ace6935101034bb" +checksum = "cdff93fa38be6982f8613a060e18fa0a37ce440d69ed3b7f37c6c69036ce1c53" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -619,9 +619,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7435f6bfb93912f16d64bb61f4278fa698469e054784f477337ef87ec0b2527b" +checksum = "2d9dc647985db41fd164e807577134da1179b9f5ba0959f8698d6587eaa568f5" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -631,9 +631,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9afa753a97002a33b2ccb707d9f15f31c81b8c1b786c95b73cc62bb1d1fd0c3f" +checksum = "ed3dc8d4a08ffc90c1381d39a4afa2227668259a42c97ab6eecf51cbd82a8761" dependencies = [ "alloy-primitives", "arbitrary", @@ -643,9 +643,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2cbff01a673936c2efd7e00d4c0e9a4dbbd6d600e2ce298078d33efbb19cd7" +checksum = "16188684100f6e0f2a2b949968fe3007749c5be431549064a1bce4e7b3a196a9" dependencies = [ "alloy-primitives", "async-trait", @@ -657,9 +657,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6d988cb6cd7d2f428a74476515b1a6e901e08c796767f9f93311ab74005c8b" +checksum = "e2184dab8c9493ab3e1c9f6bd3bdb563ed322b79023d81531935e84a4fdf7cf1" dependencies = [ "alloy-consensus", "alloy-network", @@ -675,9 +675,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bfd7853b65a2b4f49629ec975fee274faf6dff15ab8894c620943398ef283c0" +checksum = "d9d64f851d95619233f74b310f12bcf16e0cbc27ee3762b6115c14a84809280a" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -689,9 +689,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ec42f342d9a9261699f8078e57a7a4fda8aaa73c1a212ed3987080e6a9cd13" +checksum = "6bf7ed1574b699f48bf17caab4e6e54c6d12bc3c006ab33d58b1e227c1c3559f" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -707,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2c50e6a62ee2b4f7ab3c6d0366e5770a21cad426e109c2f40335a1b3aff3df" +checksum = "8c02997ccef5f34f9c099277d4145f183b422938ed5322dc57a089fe9b9ad9ee" dependencies = [ "const-hex", "dunce", @@ -722,9 +722,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac17c6e89a50fb4a758012e4b409d9a0ba575228e69b539fe37d7a1bd507ca4a" +checksum = "ce13ff37285b0870d0a0746992a4ae48efaf34b766ae4c2640fa15e5305f8e73" dependencies = [ "serde", "winnow", @@ -732,9 +732,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9dc0fffe397aa17628160e16b89f704098bf3c9d74d5d369ebc239575936de5" +checksum = "1174cafd6c6d810711b4e00383037bdb458efc4fe3dbafafa16567e0320c54d8" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -745,9 +745,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d69d36982b9e46075ae6b792b0f84208c6c2c15ad49f6c500304616ef67b70e0" +checksum = "628be5b9b75e4f4c4f2a71d985bbaca4f23de356dc83f1625454c505f5eef4df" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -765,9 +765,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e02ffd5d93ffc51d72786e607c97de3b60736ca3e636ead0ec1f7dce68ea3fd" +checksum = "4e24412cf72f79c95cd9b1d9482e3a31f9d94c24b43c4b3b710cc8d4341eaab0" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -780,9 +780,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6f8b87cb84bae6d81ae6604b37741c8116f84f9784a0ecc6038c302e679d23" +checksum = "0577a1f67ce70ece3f2b27cf1011da7222ef0a5701f7dcb558e5356278eeb531" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -799,9 +799,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c085c4e1e7680b723ffc558f61a22c061ed3f70eb3436f93f3936779c59cec1" +checksum = "1ca46272d17f9647fdb56080ed26c72b3ea5078416831130f5ed46f3b4be0ed6" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -5408,9 +5408,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78f0daa0d0936d436a21b57571b1e27c5663aa2ab62f6edae5ba5be999f9f93e" +checksum = "f9d95d0ec6457ad4d3d7fc0ad41db490b219587ed837ada87a26b28e535db15f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5426,9 +5426,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb0964932faa7050b74689f017aca66ffa3e52501080278a81bb0a43836c8dd" +checksum = "a8692a934265dd0fc68f02e2a1d644a80b76ae07dbc59552aa51d6df06953e9d" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5441,9 +5441,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9a690fcc404e44c3589dd39cf22895df42f7ef8671a07828b8c376c39be46a" +checksum = "f973f9e396dc53138ef89501875991bb1728ec34bbd9c0e1ab30caa5518abfa3" dependencies = [ "alloy-consensus", "alloy-network", @@ -5456,9 +5456,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d8c057c1a5bdf72d1f86c470a4d90f2d2ad1b273caa547c04cd6affe45b466d" +checksum = "29cb147e6f39d34bd8284b1107bcbca2e4c14e95b5bc49e5498ca6c0068a94c2" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5480,9 +5480,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98debc5266443e64e03195cd1a3b6cdbe8d8679e9d8c4b76a3670d24b2e267a" +checksum = "1d7e394ccd907c63acc8213c24ebfb1be24f184b75af89cbaa92178b11de34a8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5493,9 +5493,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73741855ffaa2041b33cb616d7db7180c1149b648c68c23bee9e15501073fb32" +checksum = "eba1b44e2035ec04cc61762cb9b5457d0ecd29d9af631e1a1c107ef571ce2318" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5512,9 +5512,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebedc32e24013c8b3faea62d091bccbb90f871286fe2238c6f7e2ff29974df8e" +checksum = "00bcf8a51980231bbcd250a686c9ef41501c9e7aaa348ffe5808aa61dfe14151" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9674,9 +9674,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" +checksum = "8d056aaa21f36038ab35fe8ce940ee332903a0b4b992b8ca805fb60c85eb2086" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9688,7 +9688,7 @@ dependencies = [ "colorchoice", "revm", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.5", ] [[package]] @@ -10745,9 +10745,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0523f59468a2696391f2a772edc089342aacd53c3caa2ac3264e598edf119b" +checksum = "219389c1ebe89f8333df8bdfb871f6631c552ff399c23cac02480b6088aad8f0" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index ab2fba7b99e0..285f0fc6e379 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -343,7 +343,7 @@ reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-ethereum-forks = { path = "crates/ethereum-forks", default-features = false } reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } -reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false } +reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } @@ -427,58 +427,58 @@ reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-feat # revm revm = { version = "18.0.0", features = ["std"], default-features = false } -revm-inspectors = "0.12.0" +revm-inspectors = "0.13.0" revm-primitives = { version = "14.0.0", default-features = false } # eth alloy-chains = { version = "0.1.32", default-features = false } -alloy-dyn-abi = "0.8.11" -alloy-primitives = { version = "0.8.11", default-features = false } +alloy-dyn-abi = "0.8.15" +alloy-primitives = { version = "0.8.15", default-features = false } alloy-rlp = { version = "0.3.10", default-features = false } -alloy-sol-types = "0.8.11" +alloy-sol-types = "0.8.15" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.7.3", default-features = false } -alloy-contract = { version = "0.7.3", default-features = false } -alloy-eips = { version = "0.7.3", default-features = false } -alloy-genesis = { version = "0.7.3", default-features = false } -alloy-json-rpc = { version = "0.7.3", default-features = false } -alloy-network = { version = "0.7.3", default-features = false } -alloy-network-primitives = { version = "0.7.3", default-features = false } -alloy-node-bindings = { version = "0.7.3", default-features = false } -alloy-provider = { version = "0.7.3", features = [ +alloy-consensus = { version = "0.8.0", default-features = false } +alloy-contract = { version = "0.8.0", default-features = false } +alloy-eips = { version = "0.8.0", default-features = false } +alloy-genesis = { version = "0.8.0", default-features = false } +alloy-json-rpc = { version = "0.8.0", default-features = false } +alloy-network = { version = "0.8.0", default-features = false } +alloy-network-primitives = { version = "0.8.0", default-features = false } +alloy-node-bindings = { version = "0.8.0", default-features = false } +alloy-provider = { version = "0.8.0", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.7.3", default-features = false } -alloy-rpc-client = { version = "0.7.3", default-features = false } -alloy-rpc-types = { version = "0.7.3", features = [ +alloy-pubsub = { version = "0.8.0", default-features = false } +alloy-rpc-client = { version = "0.8.0", default-features = false } +alloy-rpc-types = { version = "0.8.0", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.7.3", default-features = false } -alloy-rpc-types-anvil = { version = "0.7.3", default-features = false } -alloy-rpc-types-beacon = { version = "0.7.3", default-features = false } -alloy-rpc-types-debug = { version = "0.7.3", default-features = false } -alloy-rpc-types-engine = { version = "0.7.3", default-features = false } -alloy-rpc-types-eth = { version = "0.7.3", default-features = false } -alloy-rpc-types-mev = { version = "0.7.3", default-features = false } -alloy-rpc-types-trace = { version = "0.7.3", default-features = false } -alloy-rpc-types-txpool = { version = "0.7.3", default-features = false } -alloy-serde = { version = "0.7.3", default-features = false } -alloy-signer = { version = "0.7.3", default-features = false } -alloy-signer-local = { version = "0.7.3", default-features = false } -alloy-transport = { version = "0.7.3" } -alloy-transport-http = { version = "0.7.3", features = [ +alloy-rpc-types-admin = { version = "0.8.0", default-features = false } +alloy-rpc-types-anvil = { version = "0.8.0", default-features = false } +alloy-rpc-types-beacon = { version = "0.8.0", default-features = false } +alloy-rpc-types-debug = { version = "0.8.0", default-features = false } +alloy-rpc-types-engine = { version = "0.8.0", default-features = false } +alloy-rpc-types-eth = { version = "0.8.0", default-features = false } +alloy-rpc-types-mev = { version = "0.8.0", default-features = false } +alloy-rpc-types-trace = { version = "0.8.0", default-features = false } +alloy-rpc-types-txpool = { version = "0.8.0", default-features = false } +alloy-serde = { version = "0.8.0", default-features = false } +alloy-signer = { version = "0.8.0", default-features = false } +alloy-signer-local = { version = "0.8.0", default-features = false } +alloy-transport = { version = "0.8.0" } +alloy-transport-http = { version = "0.8.0", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.7.3", default-features = false } -alloy-transport-ws = { version = "0.7.3", default-features = false } +alloy-transport-ipc = { version = "0.8.0", default-features = false } +alloy-transport-ws = { version = "0.8.0", default-features = false } # op -op-alloy-rpc-types = "0.7.3" -op-alloy-rpc-types-engine = "0.7.3" -op-alloy-rpc-jsonrpsee = "0.7.3" -op-alloy-network = "0.7.3" -op-alloy-consensus = "0.7.3" +op-alloy-rpc-types = "0.8.0" +op-alloy-rpc-types-engine = "0.8.0" +op-alloy-rpc-jsonrpsee = "0.8.0" +op-alloy-network = "0.8.0" +op-alloy-consensus = "0.8.0" # misc aquamarine = "0.6" @@ -495,7 +495,7 @@ cfg-if = "1.0" clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "6.0" -derive_more = { version = "1", default-features = false, features = ["full"] } +derive_more = { version = "1", default-features = false, features = ["full"] } dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 2e2bc37a27ee..384820ca9f3f 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -42,7 +42,7 @@ impl InvalidHeaderCache { let entry = self.headers.get(hash)?; entry.hit_count += 1; if entry.hit_count < INVALID_HEADER_HIT_EVICTION_THRESHOLD { - return Some(entry.header.clone()) + return Some(entry.header) } } // if we get here, the entry has been hit too many times, so we evict it diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 72a1116c3925..e6ea1a3a3759 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -8,7 +8,7 @@ use alloy_rlp::{ use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; use reth_primitives::TransactionSigned; -use reth_primitives_traits::{SignedTransaction, Transaction}; +use reth_primitives_traits::SignedTransaction; use std::{ collections::{HashMap, HashSet}, mem, diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 86ac822c744c..214814dd84c1 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -7,7 +7,7 @@ use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; #[cfg(any(test, feature = "reth-codec"))] use alloy_consensus::constants::EIP7702_TX_TYPE_ID; -use alloy_consensus::{SignableTransaction, TxLegacy}; +use alloy_consensus::{SignableTransaction, TxLegacy, Typed2718}; use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; use derive_more::{Constructor, Deref, From}; use op_alloy_consensus::OpTypedTransaction; @@ -157,10 +157,6 @@ impl alloy_consensus::Transaction for OpTransaction { self.0.input() } - fn ty(&self) -> u8 { - self.0.ty() - } - fn access_list(&self) -> Option<&AccessList> { self.0.access_list() } @@ -197,3 +193,9 @@ impl InMemorySize for OpTransaction { } } } + +impl Typed2718 for OpTransaction { + fn ty(&self) -> u8 { + self.0.ty() + } +} diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 26581214e67b..ee72c65eb5ec 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -4,6 +4,7 @@ use crate::{OpTransaction, OpTxType}; use alloc::vec::Vec; use alloy_consensus::{ transaction::RlpEcdsaTx, SignableTransaction, Transaction, TxEip1559, TxEip2930, TxEip7702, + Typed2718, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, @@ -228,7 +229,7 @@ impl alloy_rlp::Encodable for OpTransactionSigned { fn length(&self) -> usize { let mut payload_length = self.encode_2718_len(); - if !self.is_legacy() { + if !Encodable2718::is_legacy(self) { payload_length += Header { list: false, payload_length }.length(); } @@ -377,10 +378,6 @@ impl Transaction for OpTransactionSigned { self.deref().input() } - fn ty(&self) -> u8 { - self.deref().ty() - } - fn access_list(&self) -> Option<&AccessList> { self.deref().access_list() } @@ -406,6 +403,12 @@ impl Transaction for OpTransactionSigned { } } +impl Typed2718 for OpTransactionSigned { + fn ty(&self) -> u8 { + self.deref().ty() + } +} + impl Default for OpTransactionSigned { fn default() -> Self { Self { diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 3a0871c99a43..b67e51024bf6 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -7,11 +7,6 @@ pub mod tx_type; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, -}; - /// Helper trait that unifies all behaviour required by transaction to support full node operations. pub trait FullTransaction: Transaction + MaybeCompact {} @@ -32,35 +27,6 @@ pub trait Transaction: + MaybeSerde + MaybeArbitrary { - /// Returns true if the transaction is a legacy transaction. - #[inline] - fn is_legacy(&self) -> bool { - self.ty() == LEGACY_TX_TYPE_ID - } - - /// Returns true if the transaction is an EIP-2930 transaction. - #[inline] - fn is_eip2930(&self) -> bool { - self.ty() == EIP2930_TX_TYPE_ID - } - - /// Returns true if the transaction is an EIP-1559 transaction. - #[inline] - fn is_eip1559(&self) -> bool { - self.ty() == EIP1559_TX_TYPE_ID - } - - /// Returns true if the transaction is an EIP-4844 transaction. - #[inline] - fn is_eip4844(&self) -> bool { - self.ty() == EIP4844_TX_TYPE_ID - } - - /// Returns true if the transaction is an EIP-7702 transaction. - #[inline] - fn is_eip7702(&self) -> bool { - self.ty() == EIP7702_TX_TYPE_ID - } } impl Transaction for T where diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 06451c30b9e5..e8f6b53c92e0 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -127,7 +127,7 @@ impl TryFrom for TransactionSigned { } #[cfg(feature = "optimism")] AnyTxEnvelope::Unknown(alloy_network::UnknownTxEnvelope { hash, inner }) => { - use alloy_consensus::Transaction as _; + use alloy_consensus::{Transaction as _, Typed2718}; if inner.ty() == crate::TxType::Deposit { let fields: op_alloy_rpc_types::OpTransactionFields = inner diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 0ee6f860b58a..767ba30286c7 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -3,7 +3,7 @@ use crate::{ RecoveredTx, SealedHeader, TransactionSigned, }; use alloc::vec::Vec; -use alloy_consensus::Header; +use alloy_consensus::{Header, Typed2718}; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b64cf094042e..4732ba8024c3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use alloy_consensus::{ transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, - TxEip4844, TxEip4844Variant, TxEip7702, TxLegacy, TypedTransaction, + TxEip4844, TxEip4844Variant, TxEip7702, TxLegacy, Typed2718, TypedTransaction, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, @@ -192,6 +192,20 @@ impl<'a> arbitrary::Arbitrary<'a> for Transaction { } } +impl Typed2718 for Transaction { + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.ty(), + Self::Eip2930(tx) => tx.ty(), + Self::Eip1559(tx) => tx.ty(), + Self::Eip4844(tx) => tx.ty(), + Self::Eip7702(tx) => tx.ty(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.ty(), + } + } +} + // === impl Transaction === impl Transaction { @@ -710,18 +724,6 @@ impl alloy_consensus::Transaction for Transaction { } } - fn ty(&self) -> u8 { - match self { - Self::Legacy(tx) => tx.ty(), - Self::Eip2930(tx) => tx.ty(), - Self::Eip1559(tx) => tx.ty(), - Self::Eip4844(tx) => tx.ty(), - Self::Eip7702(tx) => tx.ty(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.ty(), - } - } - fn access_list(&self) -> Option<&AccessList> { match self { Self::Legacy(tx) => tx.access_list(), @@ -826,6 +828,12 @@ impl PartialEq for TransactionSigned { } } +impl Typed2718 for TransactionSigned { + fn ty(&self) -> u8 { + self.deref().ty() + } +} + // === impl TransactionSigned === impl TransactionSigned { @@ -1202,10 +1210,6 @@ impl alloy_consensus::Transaction for TransactionSigned { self.deref().input() } - fn ty(&self) -> u8 { - self.deref().ty() - } - fn access_list(&self) -> Option<&AccessList> { self.deref().access_list() } @@ -1240,7 +1244,7 @@ impl Encodable for TransactionSigned { fn length(&self) -> usize { let mut payload_length = self.encode_2718_len(); - if !self.is_legacy() { + if !Encodable2718::is_legacy(self) { payload_length += Header { list: false, payload_length }.length(); } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index eea10d44c9f8..b4790d028290 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -10,7 +10,7 @@ use alloc::vec::Vec; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, Signed, TxEip4844WithSidecar, + SignableTransaction, Signed, TxEip4844WithSidecar, Typed2718, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Result, Encodable2718}, @@ -230,7 +230,7 @@ impl Encodable for PooledTransactionsElement { fn length(&self) -> usize { let mut payload_length = self.encode_2718_len(); - if !self.is_legacy() { + if !Encodable2718::is_legacy(self) { payload_length += Header { list: false, payload_length }.length(); } @@ -383,6 +383,18 @@ impl Decodable2718 for PooledTransactionsElement { } } +impl Typed2718 for PooledTransactionsElement { + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::BlobTransaction(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + } + } +} + impl alloy_consensus::Transaction for PooledTransactionsElement { fn chain_id(&self) -> Option { match self { @@ -524,16 +536,6 @@ impl alloy_consensus::Transaction for PooledTransactionsElement { } } - fn ty(&self) -> u8 { - match self { - Self::Legacy(tx) => tx.tx().ty(), - Self::Eip2930(tx) => tx.tx().ty(), - Self::Eip1559(tx) => tx.tx().ty(), - Self::Eip7702(tx) => tx.tx().ty(), - Self::BlobTransaction(tx) => tx.tx().ty(), - } - } - fn access_list(&self) -> Option<&AccessList> { match self { Self::Legacy(tx) => tx.tx().access_list(), diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 5d1b702e9fca..769f1cd9c02e 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -162,7 +162,6 @@ pub fn block_id_to_str(id: BlockId) -> String { format!("hash {}", h.block_hash) } } - BlockId::Number(n) if n.is_number() => format!("number {n}"), BlockId::Number(n) => format!("{n}"), } } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 173a2ff34956..42d8c06e997d 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{BlockHeader, Transaction}; +use alloy_consensus::{BlockHeader, Transaction, Typed2718}; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; @@ -252,7 +252,7 @@ where let timestamp = Some(block.header.timestamp()); let receipts = receipts .drain(page_start..page_end) - .zip(transactions.iter().map(Transaction::ty)) + .zip(transactions.iter().map(Typed2718::ty)) .map(|(receipt, tx_ty)| { let inner = OtsReceipt { status: receipt.status(), diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 753bb1e33a51..91c78923c224 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -67,7 +67,7 @@ pub fn maybe_generate_tests( use rand::RngCore; // get random instance of type - let mut raw = [0u8; 1024]; + let mut raw = vec![0u8; 1024]; rand::thread_rng().fill_bytes(&mut raw); let mut unstructured = arbitrary::Unstructured::new(&raw[..]); let val: Result = arbitrary::Arbitrary::arbitrary(&mut unstructured); From 37f3933db2d8a5a909323cf8eca58646d0f1e04b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 10 Dec 2024 15:56:32 -0500 Subject: [PATCH 57/70] feat: add NetworkPrimitives to NetworkBuilder (#13169) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 1 - crates/e2e-test-utils/src/network.rs | 6 +- crates/ethereum/consensus/src/lib.rs | 120 ++++++++++-------- crates/ethereum/consensus/src/validation.rs | 25 ++-- crates/ethereum/node/src/node.rs | 4 +- crates/net/downloaders/src/bodies/bodies.rs | 5 +- crates/net/downloaders/src/bodies/queue.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 2 +- crates/net/network-api/src/events.rs | 7 +- crates/net/network-api/src/lib.rs | 5 +- crates/net/network/src/network.rs | 6 +- crates/net/p2p/src/bodies/client.rs | 6 +- crates/net/p2p/src/bodies/downloader.rs | 2 +- crates/net/p2p/src/headers/client.rs | 3 +- crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/builder/mod.rs | 32 +++-- crates/node/builder/src/components/builder.rs | 37 +++++- crates/node/builder/src/components/mod.rs | 23 ++-- crates/node/builder/src/components/network.rs | 16 ++- crates/node/builder/src/launch/common.rs | 3 +- crates/node/builder/src/setup.rs | 4 +- crates/optimism/node/src/node.rs | 4 +- crates/stages/stages/src/sets.rs | 6 +- crates/stages/stages/src/stages/execution.rs | 9 +- crates/stages/stages/src/stages/merkle.rs | 10 +- .../stages/src/stages/sender_recovery.rs | 5 +- 26 files changed, 201 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7540dc437c8..27a33b69f853 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8118,7 +8118,6 @@ dependencies = [ name = "reth-node-builder" version = "1.1.3" dependencies = [ - "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "aquamarine", diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index ce9d0b94612b..8d8ea68aa93a 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,7 +1,7 @@ use futures_util::StreamExt; use reth_network_api::{ events::PeerEvent, test_utils::PeersHandleProvider, NetworkEvent, NetworkEventListenerProvider, - PeersInfo, + PeerRequest, PeersInfo, }; use reth_network_peers::{NodeRecord, PeerId}; use reth_tokio_util::EventStream; @@ -9,8 +9,8 @@ use reth_tracing::tracing::info; /// Helper for network operations #[derive(Debug)] -pub struct NetworkTestContext { - network_events: EventStream, +pub struct NetworkTestContext { + network_events: EventStream>>, network: Network, } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 4d3ba6282694..2eef9188671f 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,7 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::merge::ALLOWED_FUTURE_BLOCK_TIME_SECONDS; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{ @@ -20,10 +21,8 @@ use reth_consensus_common::validation::{ validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, validate_header_base_fee, validate_header_extradata, validate_header_gas, }; -use reth_primitives::{ - Block, BlockBody, BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedHeader, -}; -use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; +use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedHeader}; +use reth_primitives_traits::{constants::MINIMUM_GAS_LIMIT, BlockBody}; use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. @@ -51,43 +50,46 @@ impl EthBeaconConsensus /// /// The maximum allowable difference between self and parent gas limits is determined by the /// parent's gas limit divided by the [`GAS_LIMIT_BOUND_DIVISOR`]. - fn validate_against_parent_gas_limit( + fn validate_against_parent_gas_limit( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Determine the parent gas limit, considering elasticity multiplier on the London fork. let parent_gas_limit = - if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { - parent.gas_limit * + if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) + { + parent.gas_limit() * self.chain_spec - .base_fee_params_at_timestamp(header.timestamp) + .base_fee_params_at_timestamp(header.timestamp()) .elasticity_multiplier as u64 } else { - parent.gas_limit + parent.gas_limit() }; // Check for an increase in gas limit beyond the allowed threshold. - - if header.gas_limit > parent_gas_limit { - if header.gas_limit - parent_gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR { + if header.gas_limit() > parent_gas_limit { + if header.gas_limit() - parent_gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR { return Err(ConsensusError::GasLimitInvalidIncrease { parent_gas_limit, - child_gas_limit: header.gas_limit, + child_gas_limit: header.gas_limit(), }) } } // Check for a decrease in gas limit beyond the allowed threshold. - else if parent_gas_limit - header.gas_limit >= parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR + else if parent_gas_limit - header.gas_limit() >= + parent_gas_limit / GAS_LIMIT_BOUND_DIVISOR { return Err(ConsensusError::GasLimitInvalidDecrease { parent_gas_limit, - child_gas_limit: header.gas_limit, + child_gas_limit: header.gas_limit(), }) } // Check if the self gas limit is below the minimum required limit. - else if header.gas_limit < MINIMUM_GAS_LIMIT { - return Err(ConsensusError::GasLimitInvalidMinimum { child_gas_limit: header.gas_limit }) + else if header.gas_limit() < MINIMUM_GAS_LIMIT { + return Err(ConsensusError::GasLimitInvalidMinimum { + child_gas_limit: header.gas_limit(), + }) } Ok(()) @@ -97,72 +99,75 @@ impl EthBeaconConsensus impl FullConsensus for EthBeaconConsensus where ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, - N: NodePrimitives< - BlockHeader = Header, - BlockBody = BlockBody, - Block = Block, - Receipt = Receipt, - >, + N: NodePrimitives, { fn validate_block_post_execution( &self, - block: &BlockWithSenders, + block: &BlockWithSenders, input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) } } -impl Consensus +impl Consensus for EthBeaconConsensus +where + H: BlockHeader, + B: BlockBody, { fn validate_body_against_header( &self, - body: &BlockBody, - header: &SealedHeader, + body: &B, + header: &SealedHeader, ) -> Result<(), ConsensusError> { validate_body_against_header(body, header.header()) } - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_block_pre_execution( + &self, + block: &SealedBlock, + ) -> Result<(), ConsensusError> { validate_block_pre_execution(block, &self.chain_spec) } } -impl HeaderValidator +impl HeaderValidator for EthBeaconConsensus +where + H: BlockHeader, { - fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header.header())?; validate_header_base_fee(header.header(), &self.chain_spec)?; // EIP-4895: Beacon chain push withdrawals as operations - if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && - header.withdrawals_root.is_none() + if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp()) && + header.withdrawals_root().is_none() { return Err(ConsensusError::WithdrawalsRootMissing) - } else if !self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && - header.withdrawals_root.is_some() + } else if !self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp()) && + header.withdrawals_root().is_some() { return Err(ConsensusError::WithdrawalsRootUnexpected) } // Ensures that EIP-4844 fields are valid once cancun is active. - if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp()) { validate_4844_header_standalone(header.header())?; - } else if header.blob_gas_used.is_some() { + } else if header.blob_gas_used().is_some() { return Err(ConsensusError::BlobGasUsedUnexpected) - } else if header.excess_blob_gas.is_some() { + } else if header.excess_blob_gas().is_some() { return Err(ConsensusError::ExcessBlobGasUnexpected) - } else if header.parent_beacon_block_root.is_some() { + } else if header.parent_beacon_block_root().is_some() { return Err(ConsensusError::ParentBeaconBlockRootUnexpected) } - if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { - if header.requests_hash.is_none() { + if self.chain_spec.is_prague_active_at_timestamp(header.timestamp()) { + if header.requests_hash().is_none() { return Err(ConsensusError::RequestsHashMissing) } - } else if header.requests_hash.is_some() { + } else if header.requests_hash().is_some() { return Err(ConsensusError::RequestsHashUnexpected) } @@ -171,8 +176,8 @@ impl HeaderVa fn validate_header_against_parent( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError> { validate_against_parent_hash_number(header.header(), parent)?; @@ -189,7 +194,7 @@ impl HeaderVa )?; // ensure that the blob gas fields for this block - if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp()) { validate_against_parent_4844(header.header(), parent.header())?; } @@ -198,24 +203,26 @@ impl HeaderVa fn validate_header_with_total_difficulty( &self, - header: &Header, + header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError> { let is_post_merge = self .chain_spec .fork(EthereumHardfork::Paris) - .active_at_ttd(total_difficulty, header.difficulty); + .active_at_ttd(total_difficulty, header.difficulty()); if is_post_merge { - if !header.is_zero_difficulty() { + // TODO: add `is_zero_difficulty` to `alloy_consensus::BlockHeader` trait + if !header.difficulty().is_zero() { return Err(ConsensusError::TheMergeDifficultyIsNotZero) } - if !header.nonce.is_zero() { + // TODO: helper fn in `alloy_consensus::BlockHeader` trait + if !header.nonce().is_some_and(|nonce| nonce.is_zero()) { return Err(ConsensusError::TheMergeNonceIsNotZero) } - if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + if header.ommers_hash() != EMPTY_OMMER_ROOT_HASH { return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) } @@ -241,9 +248,10 @@ impl HeaderVa let present_timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); - if header.exceeds_allowed_future_timestamp(present_timestamp) { + // TODO: move this to `alloy_consensus::BlockHeader` + if header.timestamp() > present_timestamp + ALLOWED_FUTURE_BLOCK_TIME_SECONDS { return Err(ConsensusError::TimestampIsInFuture { - timestamp: header.timestamp, + timestamp: header.timestamp(), present_timestamp, }) } @@ -263,7 +271,7 @@ mod tests { use reth_primitives::proofs; fn header_with_gas_limit(gas_limit: u64) -> SealedHeader { - let header = Header { gas_limit, ..Default::default() }; + let header = reth_primitives::Header { gas_limit, ..Default::default() }; SealedHeader::new(header, B256::ZERO) } @@ -343,7 +351,7 @@ mod tests { // that the header is valid let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let header = Header { + let header = reth_primitives::Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), ..Default::default() diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index c339c8d25c6f..b9b38b6d51c2 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,26 +1,31 @@ -use alloy_consensus::{proofs::calculate_receipt_root, TxReceipt}; +use alloy_consensus::{proofs::calculate_receipt_root, BlockHeader, TxReceipt}; use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt}; +use reth_primitives_traits::Block; /// Validate a block with regard to execution results: /// /// - Compares the receipts root in the block header to the block body /// - Compares the gas used in the block header to the actual gas usage after execution -pub fn validate_block_post_execution( - block: &BlockWithSenders, +pub fn validate_block_post_execution( + block: &BlockWithSenders, chain_spec: &ChainSpec, receipts: &[Receipt], requests: &Requests, -) -> Result<(), ConsensusError> { +) -> Result<(), ConsensusError> +where + B: Block, + ChainSpec: EthereumHardforks, +{ // Check if gas used matches the value set in header. let cumulative_gas_used = receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); - if block.gas_used != cumulative_gas_used { + if block.header().gas_used() != cumulative_gas_used { return Err(ConsensusError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas: GotExpected { got: cumulative_gas_used, expected: block.header().gas_used() }, gas_spent_by_tx: gas_spent_by_transactions(receipts), }) } @@ -29,9 +34,9 @@ pub fn validate_block_post_execution( // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if chain_spec.is_byzantium_active_at_block(block.header.number) { + if chain_spec.is_byzantium_active_at_block(block.header().number()) { if let Err(error) = - verify_receipts(block.header.receipts_root, block.header.logs_bloom, receipts) + verify_receipts(block.header().receipts_root(), block.header().logs_bloom(), receipts) { tracing::debug!(%error, ?receipts, "receipts verification failed"); return Err(error) @@ -39,8 +44,8 @@ pub fn validate_block_post_execution( } // Validate that the header requests hash matches the calculated requests hash - if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - let Some(header_requests_hash) = block.header.requests_hash else { + if chain_spec.is_prague_active_at_timestamp(block.header().timestamp()) { + let Some(header_requests_hash) = block.header().requests_hash() else { return Err(ConsensusError::RequestsHashMissing) }; let requests_hash = requests.requests_hash(); diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 54707e69b26b..4d87be68f5dc 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -10,7 +10,7 @@ use reth_ethereum_engine_primitives::{ }; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; -use reth_network::{NetworkHandle, PeersInfo}; +use reth_network::{EthNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ AddOnsContext, ConfigureEvm, FullNodeComponents, HeaderTy, NodeTypesWithDB, TxTy, }; @@ -318,6 +318,8 @@ where > + Unpin + 'static, { + type Primitives = EthNetworkPrimitives; + async fn build_network( self, ctx: &BuilderContext, diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 54026070ec8b..2f6015a09166 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -464,7 +464,10 @@ impl OrderedBodiesResponse { } } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse +where + H: BlockHeader, +{ /// Returns the block number of the first element /// /// # Panics diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index ed8c425e6114..892eae14cbb1 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -54,7 +54,6 @@ where self.inner.clear(); self.last_requested_block_number.take(); } - /// Add new request to the queue. /// Expects a sorted list of headers. pub(crate) fn push_new_request( @@ -71,6 +70,7 @@ where None => last.number(), }) .or(self.last_requested_block_number); + // Create request and push into the queue. self.inner.push( BodiesRequestFuture::new(client, consensus, self.metrics.clone()).with_headers(request), diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 92f46fa6fdd6..a3ad1f3b9dc2 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -56,8 +56,8 @@ pub(crate) struct BodiesRequestFuture { impl BodiesRequestFuture where - B: BodiesClient + 'static, H: BlockHeader, + B: BodiesClient + 'static, { /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index e17cedef11fc..39c89f4c4e21 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -133,9 +133,12 @@ pub trait NetworkPeersEvents: Send + Sync { /// Provides event subscription for the network. #[auto_impl::auto_impl(&, Arc)] -pub trait NetworkEventListenerProvider: NetworkPeersEvents { +pub trait NetworkEventListenerProvider: NetworkPeersEvents { + /// The primitive types to use in the `PeerRequest` used in the stream. + type Primitives: NetworkPrimitives; + /// Creates a new [`NetworkEvent`] listener channel. - fn event_listener(&self) -> EventStream>; + fn event_listener(&self) -> EventStream>>; /// Returns a new [`DiscoveryEvent`] stream. /// /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 986d490c34f9..6163c8730033 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -36,7 +36,6 @@ pub use events::{ use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; use reth_eth_wire_types::{capability::Capabilities, DisconnectReason, EthVersion, Status}; -use reth_network_p2p::EthBlockClient; use reth_network_peers::NodeRecord; /// The `PeerId` type. @@ -44,7 +43,7 @@ pub type PeerId = alloy_primitives::B512; /// Helper trait that unifies network API needed to launch node. pub trait FullNetwork: - BlockDownloaderProvider + BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider @@ -56,7 +55,7 @@ pub trait FullNetwork: } impl FullNetwork for T where - T: BlockDownloaderProvider + T: BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 68c57724f0df..a25ad0490818 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -205,8 +205,10 @@ impl NetworkPeersEvents for NetworkHandle { } } -impl NetworkEventListenerProvider> for NetworkHandle { - fn event_listener(&self) -> EventStream>> { +impl NetworkEventListenerProvider for NetworkHandle { + type Primitives = N; + + fn event_listener(&self) -> EventStream>> { self.inner.event_sender.new_listener() } diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index d48fccc6d000..b31954ff1a00 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -6,17 +6,17 @@ use std::{ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use alloy_primitives::B256; use futures::{Future, FutureExt}; -use reth_primitives::BlockBody; +use reth_primitives_traits::BlockBody; /// The bodies future type -pub type BodiesFut = +pub type BodiesFut = Pin>> + Send + Sync>>; /// A client capable of downloading block bodies. #[auto_impl::auto_impl(&, Arc, Box)] pub trait BodiesClient: DownloadClient { /// The body type this client fetches. - type Body: Send + Sync + Unpin + 'static; + type Body: BlockBody; /// The output of the request future for querying block bodies. type Output: Future>> + Sync + Send + Unpin; diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index b80a308d8a18..ce7827c8e885 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -15,7 +15,7 @@ pub type BodyDownloaderResult = DownloadResult>>; pub trait BodyDownloader: Send + Sync + Stream> + Unpin { - /// The type of header that can be returned in a blck + /// The type of header that is being used type Header: Debug + Send + Sync + Unpin + 'static; /// The type of the body that is being downloaded. diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index 4be6208c4a2c..606d8f389a84 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -3,6 +3,7 @@ use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; +use reth_primitives_traits::BlockHeader; use std::{ fmt::Debug, pin::Pin, @@ -57,7 +58,7 @@ pub type HeadersFut = #[auto_impl::auto_impl(&, Arc, Box)] pub trait HeadersClient: DownloadClient { /// The header type this client fetches. - type Header: Send + Sync + Unpin; + type Header: BlockHeader; /// The headers future type type Output: Future>> + Sync + Send + Unpin; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 26d157e1e0cb..1a0b5bad0a13 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -61,7 +61,6 @@ reth-transaction-pool.workspace = true ## ethereum alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } -alloy-consensus.workspace = true revm-primitives.workspace = true ## async diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index e2b18f666c76..e38882fa5d8a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -20,7 +20,7 @@ use reth_db_api::{ use reth_exex::ExExContext; use reth_network::{ transactions::TransactionsManagerConfig, NetworkBuilder, NetworkConfig, NetworkConfigBuilder, - NetworkHandle, NetworkManager, + NetworkHandle, NetworkManager, NetworkPrimitives, }; use reth_node_api::{ FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, @@ -648,19 +648,24 @@ impl BuilderContext { /// /// Spawns the configured network and associated tasks and returns the [`NetworkHandle`] /// connected to that network. - pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle + pub fn start_network( + &self, + builder: NetworkBuilder<(), (), N>, + pool: Pool, + ) -> NetworkHandle where + N: NetworkPrimitives, Pool: TransactionPool< Transaction: PoolTransaction< - Consensus = reth_primitives::TransactionSigned, - Pooled = reth_primitives::PooledTransactionsElement, + Consensus = N::BroadcastedTransaction, + Pooled = N::PooledTransaction, >, > + Unpin + 'static, Node::Provider: BlockReader< - Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = N::Block, + Header = N::BlockHeader, >, { self.start_network_with(builder, pool, Default::default()) @@ -672,24 +677,25 @@ impl BuilderContext { /// /// Spawns the configured network and associated tasks and returns the [`NetworkHandle`] /// connected to that network. - pub fn start_network_with( + pub fn start_network_with( &self, - builder: NetworkBuilder<(), ()>, + builder: NetworkBuilder<(), (), N>, pool: Pool, tx_config: TransactionsManagerConfig, - ) -> NetworkHandle + ) -> NetworkHandle where + N: NetworkPrimitives, Pool: TransactionPool< Transaction: PoolTransaction< - Consensus = reth_primitives::TransactionSigned, - Pooled = reth_primitives::PooledTransactionsElement, + Consensus = N::BroadcastedTransaction, + Pooled = N::PooledTransaction, >, > + Unpin + 'static, Node::Provider: BlockReader< - Block = reth_primitives::Block, Receipt = reth_primitives::Receipt, - Header = reth_primitives::Header, + Block = N::Block, + Header = N::BlockHeader, >, { let (handle, network, txpool, eth) = builder diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 7e2d0eb43cc0..ce24c8bff8df 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -9,7 +9,8 @@ use crate::{ }; use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_network::NetworkPrimitives; +use reth_node_api::{BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; @@ -295,13 +296,34 @@ impl NodeComponentsBuilder for ComponentsBuilder where Node: FullNodeTypes, - PoolB: PoolBuilder, - NetworkB: NetworkBuilder, + PoolB: PoolBuilder< + Node, + Pool: TransactionPool< + Transaction: PoolTransaction< + Pooled = ::PooledTransaction, + >, + >, + >, + NetworkB: NetworkBuilder< + Node, + PoolB::Pool, + Primitives: NetworkPrimitives< + BlockHeader = HeaderTy, + BlockBody = BodyTy, + >, + >, PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, ConsB: ConsensusBuilder, { - type Components = Components; + type Components = Components< + Node, + NetworkB::Primitives, + PoolB::Pool, + ExecB::EVM, + ExecB::Executor, + ConsB::Consensus, + >; async fn build_components( self, @@ -369,11 +391,12 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where + N: NetworkPrimitives, BlockBody = BodyTy>, Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, + Fut: Future>> + Send, Pool: TransactionPool>> + Unpin + 'static, @@ -381,7 +404,7 @@ where Executor: BlockExecutorProvider::Primitives>, Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index d62e74bda296..892380a4c6ca 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -20,13 +20,14 @@ pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; +use reth_network_p2p::BlockClient; use crate::{ConfigureEvm, FullNodeTypes}; use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; -use reth_network::NetworkHandle; +use reth_network::{NetworkHandle, NetworkPrimitives}; use reth_network_api::FullNetwork; -use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy}; +use reth_node_api::{BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -49,7 +50,9 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. - type Network: FullNetwork; + type Network: FullNetwork< + Client: BlockClient
, Body = BodyTy>, + >; /// Builds new blocks. type PayloadBuilder: PayloadBuilder::Engine> @@ -78,7 +81,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. @@ -88,14 +91,15 @@ pub struct Components { /// The consensus implementation of the node. pub consensus: Consensus, /// The network implementation of the node. - pub network: NetworkHandle, + pub network: NetworkHandle, /// The handle to the payload builder service. pub payload_builder: PayloadBuilderHandle<::Engine>, } -impl NodeComponents - for Components +impl NodeComponents + for Components where + N: NetworkPrimitives, BlockBody = BodyTy>, Node: FullNodeTypes, Pool: TransactionPool>> + Unpin @@ -108,7 +112,7 @@ where type Evm = EVM; type Executor = Executor; type Consensus = Cons; - type Network = NetworkHandle; + type Network = NetworkHandle; type PayloadBuilder = PayloadBuilderHandle<::Engine>; fn pool(&self) -> &Self::Pool { @@ -136,8 +140,9 @@ where } } -impl Clone for Components +impl Clone for Components where + N: NetworkPrimitives, Node: FullNodeTypes, Pool: TransactionPool, EVM: ConfigureEvm
, Transaction = TxTy>, diff --git a/crates/node/builder/src/components/network.rs b/crates/node/builder/src/components/network.rs index 5f473e408f69..33f128a69788 100644 --- a/crates/node/builder/src/components/network.rs +++ b/crates/node/builder/src/components/network.rs @@ -2,33 +2,39 @@ use std::future::Future; -use reth_network::NetworkHandle; +use reth_network::{NetworkHandle, NetworkPrimitives}; use reth_transaction_pool::TransactionPool; use crate::{BuilderContext, FullNodeTypes}; /// A type that knows how to build the network implementation. pub trait NetworkBuilder: Send { + /// The primitive types to use for the network. + type Primitives: NetworkPrimitives; + /// Launches the network implementation and returns the handle to it. fn build_network( self, ctx: &BuilderContext, pool: Pool, - ) -> impl Future> + Send; + ) -> impl Future>> + Send; } -impl NetworkBuilder for F +impl NetworkBuilder for F where Node: FullNodeTypes, + P: NetworkPrimitives, Pool: TransactionPool, F: Fn(&BuilderContext, Pool) -> Fut + Send, - Fut: Future> + Send, + Fut: Future>> + Send, { + type Primitives = P; + fn build_network( self, ctx: &BuilderContext, pool: Pool, - ) -> impl Future> + Send { + ) -> impl Future>> + Send { self(ctx, pool) } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 62226cb0b1cb..c5275647aa43 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -29,6 +29,7 @@ use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, + primitives::BlockHeader, version::{ BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES, VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA, @@ -719,7 +720,7 @@ where /// necessary pub async fn max_block(&self, client: C) -> eyre::Result> where - C: HeadersClient
, + C: HeadersClient, { self.node_config().max_block(client, self.provider_factory().clone()).await } diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 6dff28bd39b7..62cfbac9bea8 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,7 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; -use reth_node_api::{BodyTy, HeaderTy, NodePrimitives}; +use reth_node_api::{BodyTy, HeaderTy}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -41,7 +41,6 @@ where N: ProviderNodeTypes, Client: BlockClient
, Body = BodyTy> + 'static, Executor: BlockExecutorProvider, - N::Primitives: NodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) @@ -89,7 +88,6 @@ where H: HeaderDownloader
> + 'static, B: BodyDownloader
, Body = BodyTy> + 'static, Executor: BlockExecutorProvider, - N::Primitives: NodePrimitives, { let mut builder = Pipeline::::builder(); diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index e77b50f1e8f0..43585b3762fb 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -11,7 +11,7 @@ use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGenera use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_db::transaction::{DbTx, DbTxMut}; use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; -use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; +use reth_network::{EthNetworkPrimitives, NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; use reth_node_api::{AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, TxTy}; use reth_node_builder::{ components::{ @@ -656,6 +656,8 @@ where > + Unpin + 'static, { + type Primitives = EthNetworkPrimitives; + async fn build_network( self, ctx: &BuilderContext, diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index df5a4c542bfa..53eb23379646 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -215,7 +215,7 @@ where impl OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader
+ 'static, + H: HeaderDownloader + 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. @@ -236,7 +236,7 @@ where provider: P, tip: watch::Receiver, header_downloader: H, - consensus: Arc, + consensus: Arc>, stages_config: StageConfig, ) -> StageSetBuilder where @@ -258,7 +258,7 @@ where impl StageSet for OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader
+ 'static, + H: HeaderDownloader + 'static, B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 91afc33efaa0..685b0abb9e60 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,5 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::{BlockHeader, Header, Sealable}; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_primitives::BlockNumber; use num_traits::Zero; @@ -194,10 +194,7 @@ where unwind_to: Option, ) -> Result<(), StageError> where - Provider: StaticFileProviderFactory - + DBProvider - + BlockReader - + HeaderProvider
, + Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, { // If thre's any receipts pruning configured, receipts are written directly to database and // inconsistencies are expected. @@ -267,7 +264,7 @@ where impl Stage for ExecutionStage where - E: BlockExecutorProvider>, + E: BlockExecutorProvider, Provider: DBProvider + BlockReader< Block = ::Block, diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index ff4d37cf3f61..8cd7abc7316c 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -136,7 +136,7 @@ where Provider: DBProvider + TrieWriter + StatsReader - + HeaderProvider
+ + HeaderProvider + StageCheckpointReader + StageCheckpointWriter, { @@ -344,18 +344,18 @@ where /// Check that the computed state root matches the root in the expected header. #[inline] -fn validate_state_root( +fn validate_state_root( got: B256, - expected: SealedHeader, + expected: SealedHeader, target_block: BlockNumber, ) -> Result<(), StageError> { - if got == expected.state_root { + if got == expected.state_root() { Ok(()) } else { error!(target: "sync::stages::merkle", ?target_block, ?got, ?expected, "Failed to verify block state root! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); Err(StageError::Block { error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( - GotExpected { got, expected: expected.state_root }.into(), + GotExpected { got, expected: expected.state_root() }.into(), )), block: Box::new(expected.block_with_parent()), }) diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index b5506068f481..2dcce61b90d0 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -59,7 +59,7 @@ impl Default for SenderRecoveryStage { impl Stage for SenderRecoveryStage where Provider: DBProvider - + BlockReader
+ + BlockReader + StaticFileProviderFactory> + StatsReader + PruneCheckpointReader, @@ -146,8 +146,7 @@ fn recover_range( senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where - Provider: - DBProvider + HeaderProvider
+ StaticFileProviderFactory, + Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, CURSOR: DbCursorRW, { debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); From 7b9ca0af733ccd13c2e2964cc9e982d6e4603864 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 11 Dec 2024 01:01:43 +0100 Subject: [PATCH 58/70] perf: enable map-foldhash alloy-primitives feature globally (#13278) --- Cargo.toml | 5 +++-- crates/trie/sparse/Cargo.toml | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 285f0fc6e379..20f7ba19f60e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -433,7 +433,9 @@ revm-primitives = { version = "14.0.0", default-features = false } # eth alloy-chains = { version = "0.1.32", default-features = false } alloy-dyn-abi = "0.8.15" -alloy-primitives = { version = "0.8.15", default-features = false } +alloy-primitives = { version = "0.8.15", default-features = false, features = [ + "map-foldhash", +] } alloy-rlp = { version = "0.3.10", default-features = false } alloy-sol-types = "0.8.15" alloy-trie = { version = "0.7", default-features = false } @@ -563,7 +565,6 @@ tracing-futures = "0.2" tower = "0.4" tower-http = "0.6" - # p2p discv5 = "0.8.0" if-addrs = "0.13" diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 09826e410847..205451ef72a8 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -11,7 +11,6 @@ description = "Sparse MPT implementation" [lints] workspace = true - [dependencies] # reth reth-primitives-traits.workspace = true From 0144a433dfb75319ee9ae98da1ad3943df7a893e Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 11 Dec 2024 03:49:15 +0100 Subject: [PATCH 59/70] perf: reduce size of DatabaseError (#13283) --- crates/errors/src/error.rs | 2 +- crates/storage/errors/src/db.rs | 4 ++-- crates/trie/parallel/src/proof.rs | 6 +----- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/crates/errors/src/error.rs b/crates/errors/src/error.rs index 869d5732746d..2d97572f529a 100644 --- a/crates/errors/src/error.rs +++ b/crates/errors/src/error.rs @@ -81,7 +81,7 @@ mod size_asserts { static_assert_size!(RethError, 64); static_assert_size!(BlockExecutionError, 56); static_assert_size!(ConsensusError, 48); - static_assert_size!(DatabaseError, 40); + static_assert_size!(DatabaseError, 32); static_assert_size!(ProviderError, 48); static_assert_size!(CanonicalError, 56); } diff --git a/crates/storage/errors/src/db.rs b/crates/storage/errors/src/db.rs index 22efbb1fb4f8..a9000a952b7f 100644 --- a/crates/storage/errors/src/db.rs +++ b/crates/storage/errors/src/db.rs @@ -64,7 +64,7 @@ impl core::error::Error for DatabaseError { #[display("{message} ({code})")] pub struct DatabaseErrorInfo { /// Human-readable error message. - pub message: String, + pub message: Box, /// Error code. pub code: i32, } @@ -75,7 +75,7 @@ where { #[inline] fn from(error: E) -> Self { - Self { message: error.to_string(), code: error.into() } + Self { message: error.to_string().into(), code: error.into() } } } diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 148f7cd5d4d7..0eec133d71ea 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -136,11 +136,7 @@ where .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().cloned())) .with_branch_node_hash_masks(self.collect_branch_node_hash_masks) .storage_multiproof(target_slots) - .map_err(|e| { - ParallelStateRootError::StorageRoot(StorageRootError::Database( - DatabaseError::Other(e.to_string()), - )) - }) + .map_err(|e| ParallelStateRootError::Other(e.to_string())) })(); if let Err(err) = tx.send(result) { error!(target: "trie::parallel", ?hashed_address, err_content = ?err.0, "Failed to send proof result"); From 0494ca01d58a1474397034c01522273a6f9b238e Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Wed, 11 Dec 2024 05:52:42 +0100 Subject: [PATCH 60/70] perf: trie micro optimizations (#13282) --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/chain-state/src/in_memory.rs | 9 +- crates/chain-state/src/memory_overlay.rs | 10 +-- crates/engine/tree/src/tree/root.rs | 18 ++-- crates/evm/execution-errors/src/trie.rs | 67 ++++++++++++++- crates/revm/src/test_utils.rs | 8 +- crates/rpc/rpc-builder/src/config.rs | 2 +- crates/rpc/rpc-eth-types/src/cache/db.rs | 11 +-- .../src/providers/bundle_state_provider.rs | 11 +-- .../src/providers/database/provider.rs | 10 +-- .../src/providers/state/historical.rs | 22 +++-- .../provider/src/providers/state/latest.rs | 22 +++-- .../provider/src/providers/state/macros.rs | 4 +- .../storage/provider/src/test_utils/blocks.rs | 2 +- .../storage/provider/src/test_utils/mock.rs | 8 +- crates/storage/provider/src/test_utils/mod.rs | 2 +- .../storage/provider/src/test_utils/noop.rs | 9 +- crates/storage/storage-api/src/noop.rs | 9 +- crates/storage/storage-api/src/trie.rs | 15 ++-- crates/trie/common/src/account.rs | 11 +-- crates/trie/common/src/prefix_set.rs | 17 ++-- crates/trie/common/src/proofs.rs | 7 +- crates/trie/common/src/updates.rs | 19 ++--- crates/trie/db/src/proof.rs | 6 +- crates/trie/db/src/state.rs | 7 +- crates/trie/db/src/witness.rs | 6 +- crates/trie/parallel/src/proof.rs | 24 +++--- .../trie/parallel/src/storage_root_targets.rs | 5 +- crates/trie/sparse/benches/root.rs | 4 +- crates/trie/sparse/src/lib.rs | 3 +- crates/trie/sparse/src/state.rs | 52 ++++++------ crates/trie/sparse/src/trie.rs | 85 ++++++++++--------- .../trie/trie/src/hashed_cursor/post_state.rs | 6 +- crates/trie/trie/src/proof/blinded.rs | 6 +- crates/trie/trie/src/proof/mod.rs | 8 +- crates/trie/trie/src/state.rs | 18 ++-- crates/trie/trie/src/witness.rs | 27 +++--- 37 files changed, 306 insertions(+), 246 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index e8576de4a711..6d1d8930defc 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1244,7 +1244,7 @@ where )) .with_prefix_sets(prefix_sets) .root_with_updates() - .map_err(Into::::into)?; + .map_err(BlockValidationError::from)?; let tip = blocks.tip(); if state_root != tip.state_root { return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch { diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 670c340db4bf..536f6baf96e8 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -944,7 +944,7 @@ mod tests { use super::*; use crate::test_utils::TestBlockBuilder; use alloy_eips::eip7685::Requests; - use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; + use alloy_primitives::{map::B256HashMap, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; @@ -953,7 +953,8 @@ mod tests { StateRootProvider, StorageRootProvider, }; use reth_trie::{ - AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, + AccountProof, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, + StorageProof, TrieInput, }; fn create_mock_state( @@ -1094,7 +1095,7 @@ mod tests { fn multiproof( &self, _input: TrieInput, - _targets: HashMap>, + _targets: MultiProofTargets, ) -> ProviderResult { Ok(MultiProof::default()) } @@ -1103,7 +1104,7 @@ mod tests { &self, _input: TrieInput, _target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(HashMap::default()) } } diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 21bc30b07cf7..da4c2c9fea7d 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,9 +1,7 @@ use super::ExecutedBlock; use alloy_consensus::BlockHeader; use alloy_primitives::{ - keccak256, - map::{HashMap, HashSet}, - Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, + keccak256, map::B256HashMap, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, NodePrimitives}; @@ -13,7 +11,7 @@ use reth_storage_api::{ }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - StorageMultiProof, TrieInput, + MultiProofTargets, StorageMultiProof, TrieInput, }; use revm::db::BundleState; use std::sync::OnceLock; @@ -201,7 +199,7 @@ macro_rules! impl_state_provider { fn multiproof( &self, mut input: TrieInput, - targets: HashMap>, + targets: MultiProofTargets, ) -> ProviderResult { let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); input.prepend_cached(nodes, state); @@ -212,7 +210,7 @@ macro_rules! impl_state_provider { &self, mut input: TrieInput, target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); input.prepend_cached(nodes, state); self.historical.witness(input, target) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index a8c455265dd5..72b18d49f52c 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -8,13 +8,13 @@ use reth_provider::{ StateCommitmentProvider, }; use reth_trie::{ - proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, - TrieInput, + proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, Nibbles, TrieInput, }; use reth_trie_db::DatabaseProof; use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ - errors::{SparseStateTrieResult, SparseTrieError}, + errors::{SparseStateTrieResult, SparseTrieErrorKind}, SparseStateTrie, }; use revm_primitives::{keccak256, EvmState, B256}; @@ -232,7 +232,7 @@ pub struct StateRootTask { /// Sender for state root related messages. tx: Sender, /// Proof targets that have been already fetched. - fetched_proof_targets: HashMap>, + fetched_proof_targets: MultiProofTargets, /// Proof sequencing handler. proof_sequencer: ProofSequencer, /// The sparse trie used for the state root calculation. If [`None`], then update is in @@ -297,7 +297,7 @@ where view: ConsistentDbView, input: Arc, update: EvmState, - fetched_proof_targets: &mut HashMap>, + fetched_proof_targets: &mut MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: Sender, ) { @@ -525,8 +525,8 @@ where /// account shouldn't be included. fn get_proof_targets( state_update: &HashedPostState, - fetched_proof_targets: &HashMap>, -) -> HashMap> { + fetched_proof_targets: &MultiProofTargets, +) -> MultiProofTargets { let mut targets = HashMap::default(); // first collect all new accounts (not previously fetched) @@ -558,7 +558,7 @@ fn get_proof_targets( fn update_sparse_trie( mut trie: Box, multiproof: MultiProof, - targets: HashMap>, + targets: MultiProofTargets, state: HashedPostState, ) -> SparseStateTrieResult<(Box, Duration)> { trace!(target: "engine::root::sparse", "Updating sparse trie"); @@ -576,7 +576,7 @@ fn update_sparse_trie( .par_bridge() .map(|(address, storage, storage_trie)| { trace!(target: "engine::root::sparse", ?address, "Updating storage"); - let mut storage_trie = storage_trie.ok_or(SparseTrieError::Blind)?; + let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { trace!(target: "engine::root::sparse", ?address, "Wiping storage"); diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index ba1bfcc02370..1242eaa4b47a 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -67,7 +67,38 @@ pub type SparseStateTrieResult = Result; /// Error encountered in `SparseStateTrie`. #[derive(Error, Debug)] -pub enum SparseStateTrieError { +#[error(transparent)] +pub struct SparseStateTrieError(#[from] Box); + +impl> From for SparseStateTrieError { + #[cold] + fn from(value: T) -> Self { + Self(Box::new(value.into())) + } +} + +impl From for SparseStateTrieErrorKind { + #[cold] + fn from(value: SparseTrieError) -> Self { + Self::Sparse(*value.0) + } +} + +impl SparseStateTrieError { + /// Returns the error kind. + pub const fn kind(&self) -> &SparseStateTrieErrorKind { + &self.0 + } + + /// Consumes the error and returns the error kind. + pub fn into_kind(self) -> SparseStateTrieErrorKind { + *self.0 + } +} + +/// Error encountered in `SparseStateTrie`. +#[derive(Error, Debug)] +pub enum SparseStateTrieErrorKind { /// Encountered invalid root node. #[error("invalid root node at {path:?}: {node:?}")] InvalidRootNode { @@ -78,7 +109,7 @@ pub enum SparseStateTrieError { }, /// Sparse trie error. #[error(transparent)] - Sparse(#[from] SparseTrieError), + Sparse(#[from] SparseTrieErrorKind), /// RLP error. #[error(transparent)] Rlp(#[from] alloy_rlp::Error), @@ -89,7 +120,31 @@ pub type SparseTrieResult = Result; /// Error encountered in `SparseTrie`. #[derive(Error, Debug)] -pub enum SparseTrieError { +#[error(transparent)] +pub struct SparseTrieError(#[from] Box); + +impl> From for SparseTrieError { + #[cold] + fn from(value: T) -> Self { + Self(Box::new(value.into())) + } +} + +impl SparseTrieError { + /// Returns the error kind. + pub const fn kind(&self) -> &SparseTrieErrorKind { + &self.0 + } + + /// Consumes the error and returns the error kind. + pub fn into_kind(self) -> SparseTrieErrorKind { + *self.0 + } +} + +/// [`SparseTrieError`] kind. +#[derive(Error, Debug)] +pub enum SparseTrieErrorKind { /// Sparse trie is still blind. Thrown on attempt to update it. #[error("sparse trie is blind")] Blind, @@ -134,6 +189,12 @@ pub enum TrieWitnessError { MissingAccount(B256), } +impl From for TrieWitnessError { + fn from(error: SparseStateTrieErrorKind) -> Self { + Self::Sparse(error.into()) + } +} + impl From for ProviderError { fn from(error: TrieWitnessError) -> Self { Self::TrieWitnessError(error.to_string()) diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 9460d3e1c784..7779d1ca8b07 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,7 +1,7 @@ use alloc::vec::Vec; use alloy_primitives::{ keccak256, - map::{HashMap, HashSet}, + map::{B256HashMap, HashMap}, Address, BlockNumber, Bytes, StorageKey, B256, U256, }; use reth_primitives::{Account, Bytecode}; @@ -12,7 +12,7 @@ use reth_storage_api::{ use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, - MultiProof, StorageMultiProof, StorageProof, TrieInput, + MultiProof, MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -136,7 +136,7 @@ impl StateProofProvider for StateProviderTest { fn multiproof( &self, _input: TrieInput, - _targets: HashMap>, + _targets: MultiProofTargets, ) -> ProviderResult { unimplemented!("proof generation is not supported") } @@ -145,7 +145,7 @@ impl StateProofProvider for StateProviderTest { &self, _input: TrieInput, _target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { unimplemented!("witness generation is not supported") } } diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 967f5840c011..5ca03f770316 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -253,7 +253,7 @@ mod tests { fn test_rpc_gas_cap() { let args = CommandParser::::parse_from(["reth"]).args; let config = args.eth_config(); - assert_eq!(config.rpc_gas_cap, Into::::into(RPC_DEFAULT_GAS_CAP)); + assert_eq!(config.rpc_gas_cap, u64::from(RPC_DEFAULT_GAS_CAP)); let args = CommandParser::::parse_from(["reth", "--rpc.gascap", "1000"]).args; diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index ed107f3b0a9e..bea496166580 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -2,14 +2,11 @@ //! in default implementation of //! `reth_rpc_eth_api::helpers::Call`. -use alloy_primitives::{ - map::{HashMap, HashSet}, - Address, B256, U256, -}; +use alloy_primitives::{Address, B256, U256}; use reth_errors::ProviderResult; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_storage_api::{HashedPostStateProvider, StateProvider}; -use reth_trie::HashedStorage; +use reth_trie::{HashedStorage, MultiProofTargets}; use revm::Database; /// Helper alias type for the state's [`CacheDB`] @@ -91,7 +88,7 @@ impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { fn multiproof( &self, input: reth_trie::TrieInput, - targets: HashMap>, + targets: MultiProofTargets, ) -> ProviderResult { self.0.multiproof(input, targets) } @@ -100,7 +97,7 @@ impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { &self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState, - ) -> reth_errors::ProviderResult> + ) -> reth_errors::ProviderResult> { self.0.witness(input, target) } diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 619296b57f38..16cd64ca2293 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,16 +1,13 @@ use crate::{ AccountReader, BlockHashReader, ExecutionDataProvider, StateProvider, StateRootProvider, }; -use alloy_primitives::{ - map::{HashMap, HashSet}, - Address, BlockNumber, Bytes, B256, -}; +use alloy_primitives::{map::B256HashMap, Address, BlockNumber, Bytes, B256}; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - StorageMultiProof, TrieInput, + MultiProofTargets, StorageMultiProof, TrieInput, }; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] @@ -169,7 +166,7 @@ impl StateProofProvider fn multiproof( &self, mut input: reth_trie::TrieInput, - targets: HashMap>, + targets: MultiProofTargets, ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); input.prepend(self.hashed_post_state(bundle_state)); @@ -180,7 +177,7 @@ impl StateProofProvider &self, mut input: TrieInput, target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.witness(input, target) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index da8fb97cedc6..eaaecb7568c1 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -27,7 +27,7 @@ use alloy_eips::{ }; use alloy_primitives::{ keccak256, - map::{hash_map, HashMap, HashSet}, + map::{hash_map, B256HashMap, HashMap, HashSet}, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, }; use itertools::Itertools; @@ -296,7 +296,7 @@ impl DatabaseProvider::default(); + let mut storage_prefix_sets = B256HashMap::::default(); let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); @@ -321,7 +321,7 @@ impl DatabaseProvider::into)?; + .map_err(reth_db::DatabaseError::from)?; let parent_number = range.start().saturating_sub(1); let parent_state_root = self @@ -2310,7 +2310,7 @@ impl StorageTrieWriter for DatabaseP /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( &self, - storage_tries: &HashMap, + storage_tries: &B256HashMap, ) -> ProviderResult { let mut num_entries = 0; let mut storage_tries = Vec::from_iter(storage_tries); @@ -2549,7 +2549,7 @@ impl HashingWriter for DatabaseProvi let (state_root, trie_updates) = StateRoot::from_tx(&self.tx) .with_prefix_sets(prefix_sets) .root_with_updates() - .map_err(Into::::into)?; + .map_err(reth_db::DatabaseError::from)?; if state_root != expected_state_root { return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch { root: GotExpected { got: state_root, expected: expected_state_root }, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 93752c1e278d..be5c3b5041e9 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -4,8 +4,7 @@ use crate::{ }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ - map::{HashMap, HashSet}, - Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, + map::B256HashMap, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_db::{tables, BlockNumberList}; use reth_db_api::{ @@ -23,8 +22,8 @@ use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, - StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StateRoot, + StorageMultiProof, StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, @@ -346,7 +345,7 @@ impl StorageRoo let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); StorageProof::overlay_storage_proof(self.tx(), address, slot, revert_storage) - .map_err(Into::::into) + .map_err(ProviderError::from) } fn storage_multiproof( @@ -358,7 +357,7 @@ impl StorageRoo let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); StorageProof::overlay_storage_multiproof(self.tx(), address, slots, revert_storage) - .map_err(Into::::into) + .map_err(ProviderError::from) } } @@ -373,26 +372,25 @@ impl StateProof slots: &[B256], ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx(), input, address, slots) - .map_err(Into::::into) + Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) } fn multiproof( &self, mut input: TrieInput, - targets: HashMap>, + targets: MultiProofTargets, ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) } fn witness( &self, mut input: TrieInput, target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { input.prepend(self.revert_state()?); - TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(ProviderError::from) } } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index bdb6de1e569e..abbab7259060 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -3,8 +3,7 @@ use crate::{ HashedPostStateProvider, StateProvider, StateRootProvider, }; use alloy_primitives::{ - map::{HashMap, HashSet}, - Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, + map::B256HashMap, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_db::tables; use reth_db_api::{cursor::DbDupCursorRO, transaction::DbTx}; @@ -17,8 +16,8 @@ use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, - StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StateRoot, + StorageMultiProof, StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, @@ -113,7 +112,7 @@ impl StorageRootProvider hashed_storage: HashedStorage, ) -> ProviderResult { StorageProof::overlay_storage_proof(self.tx(), address, slot, hashed_storage) - .map_err(Into::::into) + .map_err(ProviderError::from) } fn storage_multiproof( @@ -123,7 +122,7 @@ impl StorageRootProvider hashed_storage: HashedStorage, ) -> ProviderResult { StorageProof::overlay_storage_multiproof(self.tx(), address, slots, hashed_storage) - .map_err(Into::::into) + .map_err(ProviderError::from) } } @@ -136,24 +135,23 @@ impl StateProofProvider address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx(), input, address, slots) - .map_err(Into::::into) + Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) } fn multiproof( &self, input: TrieInput, - targets: HashMap>, + targets: MultiProofTargets, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) } fn witness( &self, input: TrieInput, target: HashedPostState, - ) -> ProviderResult> { - TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) + ) -> ProviderResult> { + TrieWitness::overlay_witness(self.tx(), input, target).map_err(ProviderError::from) } } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 1fa15214e9a9..da7507df8a1d 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -54,8 +54,8 @@ macro_rules! delegate_provider_impls { } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; - fn multiproof(&self, input: reth_trie::TrieInput, targets: alloy_primitives::map::HashMap>) -> reth_storage_errors::provider::ProviderResult; - fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; + fn multiproof(&self, input: reth_trie::TrieInput, targets: reth_trie::MultiProofTargets) -> reth_storage_errors::provider::ProviderResult; + fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } HashedPostStateProvider $(where [$($generics)*])? { fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> reth_trie::HashedPostState; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index b5c0ba7a1200..7f357aa186f7 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -171,7 +171,7 @@ fn bundle_state_root(execution_outcome: &ExecutionOutcome) -> B256 { ( address, ( - Into::::into(info), + Account::from(info), storage_root_unhashed( account .storage diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 2aa70a47b239..76a584e6d886 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -12,7 +12,7 @@ use alloy_eips::{ }; use alloy_primitives::{ keccak256, - map::{HashMap, HashSet}, + map::{B256HashMap, HashMap}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; @@ -35,7 +35,7 @@ use reth_storage_api::{ use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, - StorageMultiProof, StorageProof, TrieInput, + MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -673,7 +673,7 @@ impl StateProofProvider for MockEthProvider { fn multiproof( &self, _input: TrieInput, - _targets: HashMap>, + _targets: MultiProofTargets, ) -> ProviderResult { Ok(MultiProof::default()) } @@ -682,7 +682,7 @@ impl StateProofProvider for MockEthProvider { &self, _input: TrieInput, _target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(HashMap::default()) } } diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 2c3795573c20..b6788914ee83 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -80,7 +80,7 @@ pub fn insert_genesis>( let (root, updates) = StateRoot::from_tx(provider.tx_ref()) .root_with_updates() - .map_err(Into::::into)?; + .map_err(reth_db::DatabaseError::from)?; provider.write_trie_updates(&updates).unwrap(); provider.commit()?; diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index b72df25af289..065730c9caa5 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -10,7 +10,7 @@ use alloy_eips::{ BlockHashOrNumber, BlockId, BlockNumberOrTag, }; use alloy_primitives::{ - map::{HashMap, HashSet}, + map::{B256HashMap, HashMap}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use reth_chain_state::{ @@ -32,7 +32,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, TrieInput, }; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use tokio::sync::{broadcast, watch}; @@ -401,7 +402,7 @@ impl StateProofProvider for NoopProvider { fn multiproof( &self, _input: TrieInput, - _targets: HashMap>, + _targets: MultiProofTargets, ) -> ProviderResult { Ok(MultiProof::default()) } @@ -410,7 +411,7 @@ impl StateProofProvider for NoopProvider { &self, _input: TrieInput, _target: HashedPostState, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(HashMap::default()) } } diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 858c8e4c8329..27cfc8253520 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -12,7 +12,7 @@ use alloy_eips::{ BlockHashOrNumber, BlockId, BlockNumberOrTag, }; use alloy_primitives::{ - map::{HashMap, HashSet}, + map::{B256HashMap, HashMap}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; @@ -25,7 +25,8 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, TrieInput, }; use std::{ marker::PhantomData, @@ -442,7 +443,7 @@ impl StateProofProvider for NoopProvider>, + _targets: MultiProofTargets, ) -> ProviderResult { Ok(MultiProof::default()) } @@ -451,7 +452,7 @@ impl StateProofProvider for NoopProvider ProviderResult> { + ) -> ProviderResult> { Ok(HashMap::default()) } } diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index ee1ca1de1800..fefa7e4508d5 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,12 +1,9 @@ -use alloy_primitives::{ - map::{HashMap, HashSet}, - Address, Bytes, B256, -}; +use alloy_primitives::{map::B256HashMap, Address, Bytes, B256}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::{StorageTrieUpdates, TrieUpdates}, - AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, StorageProof, - TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, + StorageProof, TrieInput, }; /// A type that can compute the state root of a given post state. @@ -84,7 +81,7 @@ pub trait StateProofProvider: Send + Sync { fn multiproof( &self, input: TrieInput, - targets: HashMap>, + targets: MultiProofTargets, ) -> ProviderResult; /// Get trie witness for provided state. @@ -92,7 +89,7 @@ pub trait StateProofProvider: Send + Sync { &self, input: TrieInput, target: HashedPostState, - ) -> ProviderResult>; + ) -> ProviderResult>; } /// Trie Writer @@ -114,7 +111,7 @@ pub trait StorageTrieWriter: Send + Sync { /// Returns the number of entries modified. fn write_storage_trie_updates( &self, - storage_tries: &HashMap, + storage_tries: &B256HashMap, ) -> ProviderResult; /// Writes storage trie updates for the given hashed address. diff --git a/crates/trie/common/src/account.rs b/crates/trie/common/src/account.rs index 0808837063cf..60dc44d4ee18 100644 --- a/crates/trie/common/src/account.rs +++ b/crates/trie/common/src/account.rs @@ -92,13 +92,10 @@ mod tests { assert_eq!(trie_account.code_hash, KECCAK_EMPTY); // Check that the default Account converts to the same TrieAccount - assert_eq!(Into::::into((Account::default(), EMPTY_ROOT_HASH)), trie_account); + assert_eq!(TrieAccount::from((Account::default(), EMPTY_ROOT_HASH)), trie_account); // Check that the default AccountInfo converts to the same TrieAccount - assert_eq!( - Into::::into((AccountInfo::default(), EMPTY_ROOT_HASH)), - trie_account - ); + assert_eq!(TrieAccount::from((AccountInfo::default(), EMPTY_ROOT_HASH)), trie_account); } #[test] @@ -131,7 +128,7 @@ mod tests { // Check that the Account converts to the same TrieAccount assert_eq!( - Into::::into(( + TrieAccount::from(( Account { nonce: 10, balance: U256::from(1000), @@ -144,7 +141,7 @@ mod tests { // Check that the AccountInfo converts to the same TrieAccount assert_eq!( - Into::::into(( + TrieAccount::from(( AccountInfo { nonce: 10, balance: U256::from(1000), diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 2536a41ff0c0..d58531f12daf 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -1,8 +1,5 @@ use crate::Nibbles; -use alloy_primitives::{ - map::{HashMap, HashSet}, - B256, -}; +use alloy_primitives::map::{B256HashMap, B256HashSet}; use std::sync::Arc; /// Collection of mutable prefix sets. @@ -12,9 +9,9 @@ pub struct TriePrefixSetsMut { pub account_prefix_set: PrefixSetMut, /// A map containing storage changes with the hashed address as key and a set of storage key /// prefixes as the value. - pub storage_prefix_sets: HashMap, + pub storage_prefix_sets: B256HashMap, /// A set of hashed addresses of destroyed accounts. - pub destroyed_accounts: HashSet, + pub destroyed_accounts: B256HashSet, } impl TriePrefixSetsMut { @@ -50,9 +47,9 @@ pub struct TriePrefixSets { pub account_prefix_set: PrefixSet, /// A map containing storage changes with the hashed address as key and a set of storage key /// prefixes as the value. - pub storage_prefix_sets: HashMap, + pub storage_prefix_sets: B256HashMap, /// A set of hashed addresses of destroyed accounts. - pub destroyed_accounts: HashSet, + pub destroyed_accounts: B256HashSet, } /// A container for efficiently storing and checking for the presence of key prefixes. @@ -146,9 +143,9 @@ impl PrefixSetMut { if self.all { PrefixSet { index: 0, all: true, keys: Arc::new(Vec::new()) } } else { - self.keys.sort(); + self.keys.sort_unstable(); self.keys.dedup(); - // we need to shrink in both the sorted and non-sorted cases because deduping may have + // We need to shrink in both the sorted and non-sorted cases because deduping may have // occurred either on `freeze`, or during `contains`. self.keys.shrink_to_fit(); PrefixSet { index: 0, all: false, keys: Arc::new(self.keys) } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 99b315d2467b..eb3626d90ea2 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -4,7 +4,7 @@ use crate::{Nibbles, TrieAccount}; use alloy_consensus::constants::KECCAK_EMPTY; use alloy_primitives::{ keccak256, - map::{hash_map, HashMap}, + map::{hash_map, B256HashMap, B256HashSet, HashMap}, Address, Bytes, B256, U256, }; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; @@ -16,6 +16,9 @@ use alloy_trie::{ use itertools::Itertools; use reth_primitives_traits::Account; +/// Proof targets map. +pub type MultiProofTargets = B256HashMap; + /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes /// in the paths of target accounts. @@ -26,7 +29,7 @@ pub struct MultiProof { /// The hash masks of the branch nodes in the account proof. pub branch_node_hash_masks: HashMap, /// Storage trie multiproofs. - pub storages: HashMap, + pub storages: B256HashMap, } impl MultiProof { diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index 6f80eb16553e..1f50462507b5 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,6 +1,6 @@ use crate::{BranchNodeCompact, HashBuilder, Nibbles}; use alloy_primitives::{ - map::{HashMap, HashSet}, + map::{B256HashMap, B256HashSet, HashMap, HashSet}, B256, }; @@ -15,7 +15,7 @@ pub struct TrieUpdates { #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] pub removed_nodes: HashSet, /// Collection of updated storage tries indexed by the hashed address. - pub storage_tries: HashMap, + pub storage_tries: B256HashMap, } impl TrieUpdates { @@ -37,7 +37,7 @@ impl TrieUpdates { } /// Returns a reference to updated storage tries. - pub const fn storage_tries_ref(&self) -> &HashMap { + pub const fn storage_tries_ref(&self) -> &B256HashMap { &self.storage_tries } @@ -84,7 +84,7 @@ impl TrieUpdates { &mut self, hash_builder: HashBuilder, removed_keys: HashSet, - destroyed_accounts: HashSet, + destroyed_accounts: B256HashSet, ) { // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); @@ -347,7 +347,7 @@ pub struct TrieUpdatesSorted { pub removed_nodes: HashSet, /// Storage tries storage stored by hashed address of the account /// the trie belongs to. - pub storage_tries: HashMap, + pub storage_tries: B256HashMap, } impl TrieUpdatesSorted { @@ -362,7 +362,7 @@ impl TrieUpdatesSorted { } /// Returns reference to updated storage tries. - pub const fn storage_tries_ref(&self) -> &HashMap { + pub const fn storage_tries_ref(&self) -> &B256HashMap { &self.storage_tries } } @@ -411,10 +411,7 @@ fn exclude_empty_from_pair( #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { use crate::{BranchNodeCompact, Nibbles}; - use alloy_primitives::{ - map::{HashMap, HashSet}, - B256, - }; + use alloy_primitives::map::{B256HashMap, HashMap, HashSet}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; use std::borrow::Cow; @@ -438,7 +435,7 @@ pub mod serde_bincode_compat { pub struct TrieUpdates<'a> { account_nodes: Cow<'a, HashMap>, removed_nodes: Cow<'a, HashSet>, - storage_tries: HashMap>, + storage_tries: B256HashMap>, } impl<'a> From<&'a super::TrieUpdates> for TrieUpdates<'a> { diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 99c87bf05ebf..d7263a9436ce 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -1,7 +1,7 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use alloy_primitives::{ keccak256, - map::{HashMap, HashSet}, + map::{B256HashMap, B256HashSet, HashMap}, Address, B256, }; use reth_db_api::transaction::DbTx; @@ -30,7 +30,7 @@ pub trait DatabaseProof<'a, TX> { fn overlay_multiproof( tx: &'a TX, input: TrieInput, - targets: HashMap>, + targets: B256HashMap, ) -> Result; } @@ -66,7 +66,7 @@ impl<'a, TX: DbTx> DatabaseProof<'a, TX> fn overlay_multiproof( tx: &'a TX, input: TrieInput, - targets: HashMap>, + targets: B256HashMap, ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 5aaf3ebe5b0f..992335896cd3 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,5 +1,8 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; -use alloy_primitives::{Address, BlockNumber, B256, U256}; +use alloy_primitives::{ + map::{AddressHashMap, B256HashMap}, + Address, BlockNumber, B256, U256, +}; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, @@ -227,7 +230,7 @@ impl DatabaseHashedPostState for HashedPostState { } // Iterate over storage changesets and record value before first occurring storage change. - let mut storages = HashMap::>::default(); + let mut storages = AddressHashMap::>::default(); let mut storage_changesets_cursor = tx.cursor_read::()?; for entry in storage_changesets_cursor.walk_range(BlockNumberAddress((from, Address::ZERO))..)? diff --git a/crates/trie/db/src/witness.rs b/crates/trie/db/src/witness.rs index 54d017780ae4..c796933b90ed 100644 --- a/crates/trie/db/src/witness.rs +++ b/crates/trie/db/src/witness.rs @@ -1,5 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use alloy_primitives::{map::HashMap, Bytes, B256}; +use alloy_primitives::{map::B256HashMap, Bytes}; use reth_db_api::transaction::DbTx; use reth_execution_errors::TrieWitnessError; use reth_trie::{ @@ -17,7 +17,7 @@ pub trait DatabaseTrieWitness<'a, TX> { tx: &'a TX, input: TrieInput, target: HashedPostState, - ) -> Result, TrieWitnessError>; + ) -> Result, TrieWitnessError>; } impl<'a, TX: DbTx> DatabaseTrieWitness<'a, TX> @@ -31,7 +31,7 @@ impl<'a, TX: DbTx> DatabaseTrieWitness<'a, TX> tx: &'a TX, input: TrieInput, target: HashedPostState, - ) -> Result, TrieWitnessError> { + ) -> Result, TrieWitnessError> { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); Self::from_tx(tx) diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 0eec133d71ea..0f1a4b2624dd 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,8 +1,5 @@ use crate::{root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets}; -use alloy_primitives::{ - map::{HashMap, HashSet}, - B256, -}; +use alloy_primitives::{map::HashMap, B256}; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_db::DatabaseError; @@ -18,7 +15,8 @@ use reth_trie::{ proof::StorageProof, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, walker::TrieWalker, - HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, + HashBuilder, MultiProof, MultiProofTargets, Nibbles, TrieAccount, TrieInput, + TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::proof::ProofRetainer; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; @@ -73,7 +71,7 @@ where /// Generate a state multiproof according to specified targets. pub fn multiproof( self, - targets: HashMap>, + targets: MultiProofTargets, ) -> Result { let mut tracker = ParallelTrieTracker::default(); @@ -108,8 +106,7 @@ where storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) { let view = self.view.clone(); - let target_slots: HashSet = - targets.get(&hashed_address).cloned().unwrap_or_default(); + let target_slots = targets.get(&hashed_address).cloned().unwrap_or_default(); let trie_nodes_sorted = trie_nodes_sorted.clone(); let hashed_state_sorted = hashed_state_sorted.clone(); @@ -249,7 +246,11 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{keccak256, map::DefaultHashBuilder, Address, U256}; + use alloy_primitives::{ + keccak256, + map::{B256HashSet, DefaultHashBuilder}, + Address, U256, + }; use rand::Rng; use reth_primitives::{Account, StorageEntry}; use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; @@ -300,11 +301,10 @@ mod tests { provider_rw.commit().unwrap(); } - let mut targets = - HashMap::, DefaultHashBuilder>::default(); + let mut targets = MultiProofTargets::default(); for (address, (_, storage)) in state.iter().take(10) { let hashed_address = keccak256(*address); - let mut target_slots = HashSet::::default(); + let mut target_slots = B256HashSet::default(); for (slot, _) in storage.iter().take(5) { target_slots.insert(*slot); diff --git a/crates/trie/parallel/src/storage_root_targets.rs b/crates/trie/parallel/src/storage_root_targets.rs index 9b52d49afc80..b02467b94a00 100644 --- a/crates/trie/parallel/src/storage_root_targets.rs +++ b/crates/trie/parallel/src/storage_root_targets.rs @@ -1,11 +1,10 @@ -use alloy_primitives::B256; +use alloy_primitives::{map::B256HashMap, B256}; use derive_more::{Deref, DerefMut}; use reth_trie::prefix_set::PrefixSet; -use std::collections::HashMap; /// Target accounts with corresponding prefix sets for storage root calculation. #[derive(Deref, DerefMut, Debug)] -pub struct StorageRootTargets(HashMap); +pub struct StorageRootTargets(B256HashMap); impl StorageRootTargets { /// Create new storage root targets from updated post state accounts diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index d8d210c1b19d..c9f5d655d3e1 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -1,6 +1,6 @@ #![allow(missing_docs, unreachable_pub)] -use alloy_primitives::{map::HashMap, B256, U256}; +use alloy_primitives::{map::B256HashMap, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use itertools::Itertools; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; @@ -193,7 +193,7 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { } } -fn generate_test_data(size: usize) -> HashMap { +fn generate_test_data(size: usize) -> B256HashMap { let mut runner = TestRunner::new(ProptestConfig::default()); proptest::collection::hash_map(any::(), any::(), size) .new_tree(&mut runner) diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 1a0f3f73648e..78ab6f0a7830 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -11,6 +11,7 @@ pub mod blinded; /// Re-export sparse trie error types. pub mod errors { pub use reth_execution_errors::{ - SparseStateTrieError, SparseStateTrieResult, SparseTrieError, SparseTrieResult, + SparseStateTrieError, SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieError, + SparseTrieErrorKind, SparseTrieResult, }; } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 6f5db4eda7f9..1dad2a1378c7 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -4,11 +4,13 @@ use crate::{ }; use alloy_primitives::{ hex, - map::{HashMap, HashSet}, + map::{B256HashMap, B256HashSet}, Bytes, B256, }; use alloy_rlp::{Decodable, Encodable}; -use reth_execution_errors::{SparseStateTrieError, SparseStateTrieResult, SparseTrieError}; +use reth_execution_errors::{ + SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieError, SparseTrieErrorKind, +}; use reth_primitives_traits::Account; use reth_tracing::tracing::trace; use reth_trie_common::{ @@ -24,9 +26,9 @@ pub struct SparseStateTrie, /// Sparse storage tries. - storages: HashMap>, + storages: B256HashMap>, /// Collection of revealed account and storage keys. - revealed: HashMap>, + revealed: B256HashMap, /// Flag indicating whether trie updates should be retained. retain_updates: bool, /// Reusable buffer for RLP encoding of trie accounts. @@ -204,7 +206,7 @@ impl SparseStateTrie { /// NOTE: This method does not extensively validate the proof. pub fn reveal_multiproof( &mut self, - targets: HashMap>, + targets: B256HashMap, multiproof: MultiProof, ) -> SparseStateTrieResult<()> { let account_subtree = multiproof.account_subtree.into_nodes_sorted(); @@ -278,13 +280,13 @@ impl SparseStateTrie { // Validate root node. let Some((path, node)) = proof.next() else { return Ok(None) }; if !path.is_empty() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) + return Err(SparseStateTrieErrorKind::InvalidRootNode { path, node }.into()) } // Decode root node and perform sanity check. let root_node = TrieNode::decode(&mut &node[..])?; if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) + return Err(SparseStateTrieErrorKind::InvalidRootNode { path, node }.into()) } Ok(Some(root_node)) @@ -364,11 +366,9 @@ where slot: Nibbles, value: Vec, ) -> SparseStateTrieResult<()> { - if let Some(storage_trie) = self.storages.get_mut(&address) { - Ok(storage_trie.update_leaf(slot, value)?) - } else { - Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) - } + let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; + storage_trie.update_leaf(slot, value)?; + Ok(()) } /// Update or remove trie account based on new account info. This method will either recompute @@ -379,10 +379,10 @@ where let nibbles = Nibbles::unpack(address); let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); - storage_trie.root().ok_or(SparseTrieError::Blind)? + storage_trie.root().ok_or(SparseTrieErrorKind::Blind)? } else if self.revealed.contains_key(&address) { trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); - let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + let state = self.state.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; // The account was revealed, either... if let Some(value) = state.get_leaf_value(&nibbles) { // ..it exists and we should take it's current storage root or... @@ -392,7 +392,7 @@ where EMPTY_ROOT_HASH } } else { - return Err(SparseTrieError::Blind.into()) + return Err(SparseTrieErrorKind::Blind.into()) }; if account.is_empty() && storage_root == EMPTY_ROOT_HASH { @@ -418,18 +418,20 @@ where address: B256, slot: &Nibbles, ) -> SparseStateTrieResult<()> { - if let Some(storage_trie) = self.storages.get_mut(&address) { - Ok(storage_trie.remove_leaf(slot)?) - } else { - Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) - } + let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; + storage_trie.remove_leaf(slot)?; + Ok(()) } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{b256, Bytes, U256}; + use alloy_primitives::{ + b256, + map::{HashMap, HashSet}, + Bytes, U256, + }; use alloy_rlp::EMPTY_STRING_CODE; use arbitrary::Arbitrary; use assert_matches::assert_matches; @@ -443,8 +445,8 @@ mod tests { let sparse = SparseStateTrie::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable(),), - Err(SparseStateTrieError::InvalidRootNode { .. }) + sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), + Err(SparseStateTrieErrorKind::InvalidRootNode { .. }) ); } @@ -456,8 +458,8 @@ mod tests { (Nibbles::from_nibbles([0x1]), Bytes::new()), ]; assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable(),), - Err(SparseStateTrieError::InvalidRootNode { .. }) + sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), + Err(SparseStateTrieErrorKind::InvalidRootNode { .. }) ); } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 3cc0e8703c4b..dd5acc639c63 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ B256, }; use alloy_rlp::Decodable; -use reth_execution_errors::{SparseTrieError, SparseTrieResult}; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind, SparseTrieResult}; use reth_tracing::tracing::trace; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, @@ -106,7 +106,7 @@ impl

SparseTrie

{ /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. pub fn wipe(&mut self) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.wipe(); Ok(()) } @@ -129,14 +129,14 @@ where { /// Update the leaf node. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.update_leaf(path, value)?; Ok(()) } /// Remove the leaf node. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { - let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.remove_leaf(path)?; Ok(()) } @@ -313,10 +313,11 @@ impl

RevealedSparseTrie

{ SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} // All other node types can't be handled. node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { - return Err(SparseTrieError::Reveal { + return Err(SparseTrieErrorKind::Reveal { path: entry.key().clone(), node: Box::new(node.clone()), - }) + } + .into()) } }, Entry::Vacant(entry) => { @@ -337,10 +338,11 @@ impl

RevealedSparseTrie

{ SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} // All other node types can't be handled. node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { - return Err(SparseTrieError::Reveal { + return Err(SparseTrieErrorKind::Reveal { path: entry.key().clone(), node: Box::new(node.clone()), - }) + } + .into()) } }, Entry::Vacant(entry) => { @@ -364,10 +366,11 @@ impl

RevealedSparseTrie

{ node @ (SparseNode::Empty | SparseNode::Extension { .. } | SparseNode::Branch { .. }) => { - return Err(SparseTrieError::Reveal { + return Err(SparseTrieErrorKind::Reveal { path: entry.key().clone(), node: Box::new(node.clone()), - }) + } + .into()) } }, Entry::Vacant(entry) => { @@ -389,10 +392,11 @@ impl

RevealedSparseTrie

{ Entry::Occupied(entry) => match entry.get() { // Hash node with a different hash can't be handled. SparseNode::Hash(previous_hash) if previous_hash != &hash => { - return Err(SparseTrieError::Reveal { + return Err(SparseTrieErrorKind::Reveal { path: entry.key().clone(), node: Box::new(SparseNode::Hash(hash)), - }) + } + .into()) } _ => {} }, @@ -413,9 +417,9 @@ impl

RevealedSparseTrie

{ while let Some(node) = self.nodes.remove(¤t) { match &node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } SparseNode::Leaf { key: _key, .. } => { // Leaf node is always the one that we're deleting, and no other leaf nodes can @@ -603,13 +607,13 @@ impl

RevealedSparseTrie

{ } SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), false, SparseNodeType::Hash), SparseNode::Leaf { key, hash } => { - self.rlp_buf.clear(); let mut path = path.clone(); path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { (RlpNode::word_rlp(&hash), false, SparseNodeType::Leaf) } else { let value = self.values.get(&path).unwrap(); + self.rlp_buf.clear(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); (rlp_node, true, SparseNodeType::Leaf) @@ -825,8 +829,8 @@ where *node = SparseNode::new_leaf(path); break } - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + &mut SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } SparseNode::Leaf { key: current_key, .. } => { current.extend_from_slice_unchecked(current_key); @@ -844,6 +848,7 @@ where *node = SparseNode::new_ext(new_ext_key); // create a branch node and corresponding leaves + self.nodes.reserve(3); self.nodes.insert( current.slice(..common), SparseNode::new_split_branch(current[common], path[common]), @@ -887,6 +892,7 @@ where // create state mask for new branch node // NOTE: this might overwrite the current extension node + self.nodes.reserve(3); let branch = SparseNode::new_split_branch(current[common], path[common]); self.nodes.insert(current.slice(..common), branch); @@ -922,9 +928,9 @@ where /// Remove leaf node from the trie. pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { if self.values.remove(path).is_none() { - if let Some(SparseNode::Hash(hash)) = self.nodes.get(path) { + if let Some(&SparseNode::Hash(hash)) = self.nodes.get(path) { // Leaf is present in the trie, but it's blinded. - return Err(SparseTrieError::BlindedNode { path: path.clone(), hash: *hash }) + return Err(SparseTrieErrorKind::BlindedNode { path: path.clone(), hash }.into()) } // Leaf is not present in the trie. @@ -962,9 +968,9 @@ where let removed_path = removed_node.path; let new_node = match &removed_node.node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: removed_path, hash }.into()) } SparseNode::Leaf { .. } => { unreachable!("we already popped the leaf node") @@ -973,12 +979,11 @@ where // If the node is an extension node, we need to look at its child to see if we // need to merge them. match &child.node { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { - path: child.path, - hash: *hash, - }) + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err( + SparseTrieErrorKind::BlindedNode { path: child.path, hash }.into() + ) } // For a leaf node, we collapse the extension node into a leaf node, // extending the key. While it's impossible to encounter an extension node @@ -1041,12 +1046,13 @@ where let mut delete_child = false; let new_node = match child { - SparseNode::Empty => return Err(SparseTrieError::Blind), - SparseNode::Hash(hash) => { - return Err(SparseTrieError::BlindedNode { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: child_path, - hash: *hash, - }) + hash, + } + .into()) } // If the only child is a leaf node, we downgrade the branch node into a // leaf node, prepending the nibble to the key, and delete the old @@ -1273,7 +1279,10 @@ impl SparseTrieUpdates { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{map::HashSet, U256}; + use alloy_primitives::{ + map::{B256HashSet, HashSet}, + U256, + }; use alloy_rlp::Encodable; use assert_matches::assert_matches; use itertools::Itertools; @@ -1316,7 +1325,7 @@ mod tests { /// Returns the state root and the retained proof nodes. fn run_hash_builder( state: impl IntoIterator + Clone, - destroyed_accounts: HashSet, + destroyed_accounts: B256HashSet, proof_targets: impl IntoIterator, ) -> (B256, TrieUpdates, ProofNodes, HashMap) { let mut account_rlp = Vec::new(); @@ -1866,8 +1875,8 @@ mod tests { // Removing a blinded leaf should result in an error assert_matches!( - sparse.remove_leaf(&Nibbles::from_nibbles([0x0])), - Err(SparseTrieError::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) + sparse.remove_leaf(&Nibbles::from_nibbles([0x0])).map_err(|e| e.into_kind()), + Err(SparseTrieErrorKind::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) ); } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index e0689d450873..a4ab1fa52eaf 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -3,7 +3,7 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted, }; -use alloy_primitives::{map::HashSet, B256, U256}; +use alloy_primitives::{map::B256HashSet, B256, U256}; use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; @@ -48,7 +48,7 @@ pub struct HashedPostStateAccountCursor<'a, C> { /// Forward-only in-memory cursor over accounts. post_state_cursor: ForwardInMemoryCursor<'a, B256, Account>, /// Reference to the collection of account keys that were destroyed. - destroyed_accounts: &'a HashSet, + destroyed_accounts: &'a B256HashSet, /// The last hashed account that was returned by the cursor. /// De facto, this is a current cursor position. last_account: Option, @@ -182,7 +182,7 @@ pub struct HashedPostStateStorageCursor<'a, C> { /// Forward-only in-memory cursor over non zero-valued account storage slots. post_state_cursor: Option>, /// Reference to the collection of storage slot keys that were cleared. - cleared_slots: Option<&'a HashSet>, + cleared_slots: Option<&'a B256HashSet>, /// Flag indicating whether database storage was wiped. storage_wiped: bool, /// The last slot that has been returned by the cursor. diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs index a7b60bc6b270..1383453f344d 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/blinded.rs @@ -4,7 +4,7 @@ use alloy_primitives::{ map::{HashMap, HashSet}, Bytes, B256, }; -use reth_execution_errors::SparseTrieError; +use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; use reth_trie_sparse::blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}; use std::sync::Arc; @@ -92,7 +92,7 @@ where Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) .multiproof(targets) - .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; Ok(proof.account_subtree.into_inner().remove(&path)) } @@ -141,7 +141,7 @@ where ) .with_prefix_set_mut(storage_prefix_set) .storage_multiproof(targets) - .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; Ok(proof.subtree.into_inner().remove(&path)) } diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 8e3d0aec2ab1..1414d4a3444c 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -8,7 +8,7 @@ use crate::{ }; use alloy_primitives::{ keccak256, - map::{HashMap, HashSet}, + map::{B256HashMap, B256HashSet, HashMap, HashSet}, Address, B256, }; use alloy_rlp::{BufMut, Encodable}; @@ -103,7 +103,7 @@ where /// Generate a state multiproof according to specified targets. pub fn multiproof( mut self, - mut targets: HashMap>, + mut targets: B256HashMap, ) -> Result { let hashed_account_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; @@ -121,7 +121,7 @@ where // Initialize all storage multiproofs as empty. // Storage multiproofs for non empty tries will be overwritten if necessary. - let mut storages: HashMap<_, _> = + let mut storages: B256HashMap<_> = targets.keys().map(|key| (*key, StorageMultiProof::empty())).collect(); let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); @@ -263,7 +263,7 @@ where /// Generate storage proof. pub fn storage_multiproof( mut self, - targets: HashSet, + targets: B256HashSet, ) -> Result { let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index cc5c9d15eac9..510b914ceaf9 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -4,7 +4,7 @@ use crate::{ }; use alloy_primitives::{ keccak256, - map::{hash_map, HashMap, HashSet}, + map::{hash_map, B256HashMap, B256HashSet, HashMap, HashSet}, Address, B256, U256, }; use itertools::Itertools; @@ -18,9 +18,9 @@ use std::borrow::Cow; #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct HashedPostState { /// Mapping of hashed address to account info, `None` if destroyed. - pub accounts: HashMap>, + pub accounts: B256HashMap>, /// Mapping of hashed address to hashed storage. - pub storages: HashMap, + pub storages: B256HashMap, } impl HashedPostState { @@ -210,7 +210,7 @@ pub struct HashedStorage { /// Flag indicating whether the storage was wiped or not. pub wiped: bool, /// Mapping of hashed storage slot to storage value. - pub storage: HashMap, + pub storage: B256HashMap, } impl HashedStorage { @@ -281,14 +281,14 @@ pub struct HashedPostStateSorted { /// Updated state of accounts. pub(crate) accounts: HashedAccountsSorted, /// Map of hashed addresses to hashed storage. - pub(crate) storages: HashMap, + pub(crate) storages: B256HashMap, } impl HashedPostStateSorted { /// Create new instance of [`HashedPostStateSorted`] pub const fn new( accounts: HashedAccountsSorted, - storages: HashMap, + storages: B256HashMap, ) -> Self { Self { accounts, storages } } @@ -299,7 +299,7 @@ impl HashedPostStateSorted { } /// Returns reference to hashed account storages. - pub const fn account_storages(&self) -> &HashMap { + pub const fn account_storages(&self) -> &B256HashMap { &self.storages } } @@ -310,7 +310,7 @@ pub struct HashedAccountsSorted { /// Sorted collection of hashed addresses and their account info. pub(crate) accounts: Vec<(B256, Account)>, /// Set of destroyed account keys. - pub(crate) destroyed_accounts: HashSet, + pub(crate) destroyed_accounts: B256HashSet, } impl HashedAccountsSorted { @@ -330,7 +330,7 @@ pub struct HashedStorageSorted { /// Sorted hashed storage slots with non-zero value. pub(crate) non_zero_valued_slots: Vec<(B256, U256)>, /// Slots that have been zero valued. - pub(crate) zero_valued_slots: HashSet, + pub(crate) zero_valued_slots: B256HashSet, /// Flag indicating whether the storage was wiped or not. pub(crate) wiped: bool, } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index e8f5b8741a51..5e56cbf21c71 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -7,12 +7,13 @@ use crate::{ }; use alloy_primitives::{ keccak256, - map::{Entry, HashMap, HashSet}, + map::{B256HashMap, B256HashSet, Entry, HashMap}, Bytes, B256, }; use itertools::Itertools; use reth_execution_errors::{ - SparseStateTrieError, SparseTrieError, StateProofError, TrieWitnessError, + SparseStateTrieError, SparseStateTrieErrorKind, SparseTrieError, SparseTrieErrorKind, + StateProofError, TrieWitnessError, }; use reth_trie_common::Nibbles; use reth_trie_sparse::{ @@ -31,7 +32,7 @@ pub struct TrieWitness { /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, /// Recorded witness. - witness: HashMap, + witness: B256HashMap, } impl TrieWitness { @@ -86,7 +87,7 @@ where pub fn compute( mut self, state: HashedPostState, - ) -> Result, TrieWitnessError> { + ) -> Result, TrieWitnessError> { if state.is_empty() { return Ok(self.witness) } @@ -127,7 +128,7 @@ where let storage = state.storages.get(&hashed_address); let storage_trie = sparse_trie .storage_trie_mut(&hashed_address) - .ok_or(SparseStateTrieError::Sparse(SparseTrieError::Blind))?; + .ok_or(SparseStateTrieErrorKind::Sparse(SparseTrieErrorKind::Blind))?; for hashed_slot in hashed_slots.into_iter().sorted_unstable() { let storage_nibbles = Nibbles::unpack(hashed_slot); let maybe_leaf_value = storage @@ -138,11 +139,11 @@ where if let Some(value) = maybe_leaf_value { storage_trie .update_leaf(storage_nibbles, value) - .map_err(SparseStateTrieError::Sparse)?; + .map_err(SparseStateTrieError::from)?; } else { storage_trie .remove_leaf(&storage_nibbles) - .map_err(SparseStateTrieError::Sparse)?; + .map_err(SparseStateTrieError::from)?; } } @@ -170,13 +171,13 @@ where fn get_proof_targets( &self, state: &HashedPostState, - ) -> Result>, StateProofError> { - let mut proof_targets = HashMap::default(); + ) -> Result, StateProofError> { + let mut proof_targets = B256HashMap::default(); for hashed_address in state.accounts.keys() { - proof_targets.insert(*hashed_address, HashSet::default()); + proof_targets.insert(*hashed_address, B256HashSet::default()); } for (hashed_address, storage) in &state.storages { - let mut storage_keys = storage.storage.keys().copied().collect::>(); + let mut storage_keys = storage.storage.keys().copied().collect::(); if storage.wiped { // storage for this account was destroyed, gather all slots from the current state let mut storage_cursor = @@ -251,7 +252,9 @@ where fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { let maybe_node = self.provider.blinded_node(path)?; if let Some(node) = &maybe_node { - self.tx.send(node.clone()).map_err(|error| SparseTrieError::Other(Box::new(error)))?; + self.tx + .send(node.clone()) + .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; } Ok(maybe_node) } From 9be5a78146f3db1b570b84127ec8c9e5cbd28d84 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 08:11:57 +0100 Subject: [PATCH 61/70] chore: introduce receipts generic (#13276) --- crates/net/eth-wire-types/src/receipts.rs | 25 ++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 2bad4287f2e1..14493505df47 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -1,5 +1,6 @@ //! Implements the `GetReceipts` and `Receipts` message types. +use alloy_consensus::{RlpDecodableReceipt, RlpEncodableReceipt}; use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::add_arbitrary_tests; @@ -17,15 +18,33 @@ pub struct GetReceipts( /// The response to [`GetReceipts`], containing receipt lists that correspond to each block /// requested. -#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp)] -pub struct Receipts( +pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. - pub Vec>>, + pub Vec>>, ); +impl alloy_rlp::Encodable for Receipts { + #[inline] + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.0.encode(out) + } + #[inline] + fn length(&self) -> usize { + self.0.length() + } +} + +impl alloy_rlp::Decodable for Receipts { + #[inline] + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + alloy_rlp::Decodable::decode(buf).map(Self) + } +} + #[cfg(test)] mod tests { use crate::{message::RequestPair, GetReceipts, Receipts}; From b424ad36dc23de64d4ffd827aacacd705d48ecd5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 08:35:11 +0100 Subject: [PATCH 62/70] chore: reduce revm scope in txpool (#13274) --- Cargo.lock | 5 +++-- Cargo.toml | 1 + bin/reth/Cargo.toml | 2 +- bin/reth/src/commands/debug_cmd/build_block.rs | 11 +++++------ crates/node/builder/Cargo.toml | 2 +- crates/node/builder/src/builder/mod.rs | 2 +- crates/transaction-pool/Cargo.toml | 12 +++++++----- crates/transaction-pool/src/validate/eth.rs | 9 +++------ 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 27a33b69f853..39900bf5e639 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8118,6 +8118,7 @@ dependencies = [ name = "reth-node-builder" version = "1.1.3" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types", "aquamarine", @@ -8170,7 +8171,6 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "revm-primitives", "secp256k1", "tempfile", "tokio", @@ -9497,7 +9497,8 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "revm", + "revm-interpreter", + "revm-primitives", "rustc-hash 2.1.0", "schnellru", "serde", diff --git a/Cargo.toml b/Cargo.toml index 20f7ba19f60e..ee6282b21697 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -429,6 +429,7 @@ reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-feat revm = { version = "18.0.0", features = ["std"], default-features = false } revm-inspectors = "0.13.0" revm-primitives = { version = "14.0.0", default-features = false } +revm-interpreter = { version = "14.0.0", default-features = false } # eth alloy-chains = { version = "0.1.32", default-features = false } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index cf9c53261b45..fb86a8ced2b3 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -68,7 +68,7 @@ reth-engine-util.workspace = true reth-prune.workspace = true # crypto -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } alloy-rlp.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-consensus.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 0e4d3f7188a9..41a9d9f4f570 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,6 +1,9 @@ //! Command for debugging block building. use alloy_consensus::TxEip4844; -use alloy_eips::{eip2718::Encodable2718, eip4844::BlobTransactionSidecar}; +use alloy_eips::{ + eip2718::Encodable2718, + eip4844::{env_settings::EnvKzgSettings, BlobTransactionSidecar}, +}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::Decodable; use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -33,11 +36,7 @@ use reth_provider::{ BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::{ - cached::CachedReads, - database::StateProviderDatabase, - primitives::{EnvKzgSettings, KzgSettings}, -}; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::KzgSettings}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 1a0b5bad0a13..51409a6f84c4 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -61,7 +61,7 @@ reth-transaction-pool.workspace = true ## ethereum alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } -revm-primitives.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } ## async futures.workspace = true diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index e38882fa5d8a..98c831f2f5a2 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -9,6 +9,7 @@ use crate::{ rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; +use alloy_eips::eip4844::env_settings::EnvKzgSettings; use futures::Future; use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; @@ -38,7 +39,6 @@ use reth_provider::{ }; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool}; -use revm_primitives::EnvKzgSettings; use secp256k1::SecretKey; use std::sync::Arc; use tracing::{info, trace, warn}; diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 214633188167..f2586059967e 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -23,10 +23,11 @@ reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true reth-tasks.workspace = true -revm.workspace = true +revm-primitives.workspace = true +revm-interpreter.workspace = true # ethereum -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-consensus.workspace = true @@ -85,9 +86,10 @@ serde = [ "bitflags/serde", "parking_lot/serde", "rand?/serde", - "revm/serde", "smallvec/serde", "reth-primitives-traits/serde", + "revm-interpreter/serde", + "revm-primitives/serde" ] test-utils = [ "rand", @@ -97,7 +99,6 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-provider/test-utils", - "revm/test-utils", "reth-primitives-traits/test-utils", ] arbitrary = [ @@ -110,9 +111,10 @@ arbitrary = [ "alloy-eips/arbitrary", "alloy-primitives/arbitrary", "bitflags/arbitrary", - "revm/arbitrary", "reth-primitives-traits/arbitrary", "smallvec/arbitrary", + "revm-interpreter/arbitrary", + "revm-primitives/arbitrary" ] [[bench]] diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 998de5ffb510..bc26014f4083 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -18,16 +18,12 @@ use alloy_consensus::{ }, BlockHeader, }; -use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; +use alloy_eips::eip4844::{env_settings::EnvKzgSettings, MAX_BLOBS_PER_BLOCK}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_primitives::{InvalidTransactionError, SealedBlock}; use reth_primitives_traits::GotExpected; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; -use revm::{ - interpreter::gas::validate_initial_tx_gas, - primitives::{EnvKzgSettings, SpecId}, -}; use std::{ marker::PhantomData, sync::{atomic::AtomicBool, Arc}, @@ -807,6 +803,7 @@ pub fn ensure_intrinsic_gas( transaction: &T, fork_tracker: &ForkTracker, ) -> Result<(), InvalidPoolTransactionError> { + use revm_primitives::SpecId; let spec_id = if fork_tracker.is_prague_activated() { SpecId::PRAGUE } else if fork_tracker.is_shanghai_activated() { @@ -815,7 +812,7 @@ pub fn ensure_intrinsic_gas( SpecId::MERGE }; - let gas_after_merge = validate_initial_tx_gas( + let gas_after_merge = revm_interpreter::gas::validate_initial_tx_gas( spec_id, transaction.input(), transaction.is_create(), From d345ffc58caf1b4cf490ba4b125764f2e44f417d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 09:53:48 +0100 Subject: [PATCH 63/70] fix: fromstr impl for miner variant (#13289) --- crates/rpc/rpc-server-types/src/module.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 3eb34b34a7f2..7dc929936116 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -320,6 +320,7 @@ impl FromStr for RethRpcModule { "reth" => Self::Reth, "ots" => Self::Ots, "flashbots" => Self::Flashbots, + "miner" => Self::Miner, _ => return Err(ParseError::VariantNotFound), }) } From 6b7bf2a0adb460e4d7e7835b7a07a9d692ff0d91 Mon Sep 17 00:00:00 2001 From: faheelsattar Date: Wed, 11 Dec 2024 14:36:27 +0500 Subject: [PATCH 64/70] refactor: use constructor functions (#13255) --- crates/ethereum/evm/src/execute.rs | 122 +++++++++++------------ crates/rpc/rpc-eth-types/src/simulate.rs | 2 +- 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 6cbbb69c906b..99818ef548fd 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -421,8 +421,8 @@ mod tests { let err = executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { + &BlockWithSenders::new_unchecked( + Block { header: header.clone(), body: BlockBody { transactions: vec![], @@ -430,8 +430,8 @@ mod tests { withdrawals: None, }, }, - senders: vec![], - }, + vec![], + ), U256::ZERO, ) .into(), @@ -452,8 +452,8 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { + &BlockWithSenders::new_unchecked( + Block { header: header.clone(), body: BlockBody { transactions: vec![], @@ -461,8 +461,8 @@ mod tests { withdrawals: None, }, }, - senders: vec![], - }, + vec![], + ), U256::ZERO, ) .into(), @@ -522,8 +522,8 @@ mod tests { .batch_executor(StateProviderDatabase::new(&db)) .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { + &BlockWithSenders::new_unchecked( + Block { header, body: BlockBody { transactions: vec![], @@ -531,8 +531,8 @@ mod tests { withdrawals: None, }, }, - senders: vec![], - }, + vec![], + ), U256::ZERO, ) .into(), @@ -576,8 +576,8 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { + &BlockWithSenders::new_unchecked( + Block { header, body: BlockBody { transactions: vec![], @@ -585,8 +585,8 @@ mod tests { withdrawals: None, }, }, - senders: vec![], - }, + vec![], + ), U256::ZERO, ) .into(), @@ -622,10 +622,10 @@ mod tests { let _err = executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header: header.clone(), body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header: header.clone(), body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -643,10 +643,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -697,10 +697,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header: header.clone(), body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header: header.clone(), body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -773,10 +773,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -816,10 +816,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -866,10 +866,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -922,10 +922,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -970,10 +970,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -1005,10 +1005,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -1043,10 +1043,10 @@ mod tests { executor .execute_and_verify_one( ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, + &BlockWithSenders::new_unchecked( + Block { header, body: Default::default() }, + vec![], + ), U256::ZERO, ) .into(), @@ -1261,8 +1261,8 @@ mod tests { let header = Header { timestamp: 1, number: 1, ..Header::default() }; - let block = BlockWithSenders { - block: Block { + let block = &BlockWithSenders::new_unchecked( + Block { header, body: BlockBody { transactions: vec![], @@ -1270,8 +1270,8 @@ mod tests { withdrawals: Some(vec![withdrawal].into()), }, }, - senders: vec![], - }; + vec![], + ); let provider = executor_provider(chain_spec); let executor = provider.executor(StateProviderDatabase::new(&db)); @@ -1280,7 +1280,7 @@ mod tests { let tx_clone = tx.clone(); let _output = executor - .execute_with_state_hook((&block, U256::ZERO).into(), move |state: &EvmState| { + .execute_with_state_hook((block, U256::ZERO).into(), move |state: &EvmState| { if let Some(account) = state.get(&withdrawal_recipient) { let _ = tx_clone.send(account.info.balance); } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a6ea5c4b7881..f18232b8952e 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -204,7 +204,7 @@ where calls.push(call); } - let block = BlockWithSenders { block, senders }; + let block = BlockWithSenders::new_unchecked(block, senders); let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; From b34f23d880f786065654b122e18dc59dc93e0199 Mon Sep 17 00:00:00 2001 From: Tuan Tran Date: Wed, 11 Dec 2024 16:36:37 +0700 Subject: [PATCH 65/70] chore: Generic data prims EngineSyncController (#13037) Co-authored-by: Matthias Seitz --- crates/consensus/beacon/src/engine/sync.rs | 27 ++++++++++++++-------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 861aeebf1eb8..735441b2e2cf 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -9,8 +9,9 @@ use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - EthBlockClient, + BlockClient, }; +use reth_node_types::{BodyTy, HeaderTy}; use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; @@ -35,7 +36,7 @@ use tracing::trace; pub(crate) struct EngineSyncController where N: ProviderNodeTypes, - Client: EthBlockClient, + Client: BlockClient, { /// A downloader that can download full blocks from the network. full_block_client: FullBlockClient, @@ -51,10 +52,10 @@ where /// In-flight full block _range_ requests in progress. inflight_block_range_requests: Vec>, /// Sender for engine events. - event_sender: EventSender, + event_sender: EventSender>, /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - range_buffered_blocks: BinaryHeap>, + range_buffered_blocks: BinaryHeap, BodyTy>>>, /// Max block after which the consensus engine would terminate the sync. Used for debugging /// purposes. max_block: Option, @@ -65,7 +66,7 @@ where impl EngineSyncController where N: ProviderNodeTypes, - Client: EthBlockClient + 'static, + Client: BlockClient, { /// Create a new instance pub(crate) fn new( @@ -74,7 +75,7 @@ where pipeline_task_spawner: Box, max_block: Option, chain_spec: Arc, - event_sender: EventSender, + event_sender: EventSender>, ) -> Self { Self { full_block_client: FullBlockClient::new( @@ -92,7 +93,13 @@ where metrics: EngineSyncMetrics::default(), } } +} +impl EngineSyncController +where + N: ProviderNodeTypes, + Client: BlockClient

, Body = BodyTy> + 'static, +{ /// Sets the metrics for the active downloads fn update_block_download_metrics(&self) { self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); @@ -234,7 +241,7 @@ where /// Advances the pipeline state. /// /// This checks for the result in the channel, or returns pending if the pipeline is idle. - fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll> { let res = match self.pipeline_state { PipelineState::Idle(_) => return Poll::Pending, PipelineState::Running(ref mut fut) => { @@ -259,7 +266,7 @@ where /// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to /// run continuously. - fn try_spawn_pipeline(&mut self) -> Option { + fn try_spawn_pipeline(&mut self) -> Option> { match &mut self.pipeline_state { PipelineState::Idle(pipeline) => { let target = self.pending_pipeline_target.take()?; @@ -286,7 +293,7 @@ where } /// Advances the sync process. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // try to spawn a pipeline if a target is set if let Some(event) = self.try_spawn_pipeline() { return Poll::Ready(event) @@ -423,7 +430,7 @@ mod tests { use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; - use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; + use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient, EthBlockClient}; use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, From 2dda8a9d130227cea743a69dd54b4fbb2e2283dc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 10:37:02 +0100 Subject: [PATCH 66/70] chore: cut down on tx_type usage (#13287) --- crates/net/network/src/transactions/mod.rs | 2 +- crates/optimism/primitives/src/transaction/signed.rs | 9 +++++---- crates/primitives/src/transaction/mod.rs | 3 +-- crates/transaction-pool/src/blobstore/tracker.rs | 2 +- crates/transaction-pool/src/traits.rs | 6 +++--- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 83674c96c511..4ed352353b72 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1717,7 +1717,7 @@ impl PooledTransactionsHashesBuilder { Self::Eth68(msg) => { msg.hashes.push(*tx.tx_hash()); msg.sizes.push(tx.size); - msg.types.push(tx.transaction.tx_type().into()); + msg.types.push(tx.transaction.ty()); } } } diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index ee72c65eb5ec..41a63be3bf2e 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -54,7 +54,7 @@ impl OpTransactionSigned { /// Calculates hash of given transaction and signature and returns new instance. pub fn new(transaction: OpTypedTransaction, signature: Signature) -> Self { let signed_tx = Self::new_unhashed(transaction, signature); - if !matches!(signed_tx.tx_type(), OpTxType::Deposit) { + if signed_tx.ty() != OpTxType::Deposit { signed_tx.hash.get_or_init(|| signed_tx.recalculate_hash()); } @@ -246,9 +246,10 @@ impl alloy_rlp::Decodable for OpTransactionSigned { impl Encodable2718 for OpTransactionSigned { fn type_flag(&self) -> Option { - match self.tx_type() { - op_alloy_consensus::OpTxType::Legacy => None, - tx_type => Some(tx_type as u8), + if Typed2718::is_legacy(self) { + None + } else { + Some(self.ty()) } } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 4732ba8024c3..13c71ce2094d 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1921,13 +1921,12 @@ mod tests { // Test vector from https://sepolia.etherscan.io/tx/0x9a22ccb0029bc8b0ddd073be1a1d923b7ae2b2ea52100bae0db4424f9107e9c0 // Blobscan: https://sepolia.blobscan.com/tx/0x9a22ccb0029bc8b0ddd073be1a1d923b7ae2b2ea52100bae0db4424f9107e9c0 fn test_decode_recover_sepolia_4844_tx() { - use crate::TxType; use alloy_primitives::{address, b256}; // https://sepolia.etherscan.io/getRawTx?tx=0x9a22ccb0029bc8b0ddd073be1a1d923b7ae2b2ea52100bae0db4424f9107e9c0 let raw_tx = alloy_primitives::hex::decode("0x03f9011d83aa36a7820fa28477359400852e90edd0008252089411e9ca82a3a762b4b5bd264d4173a242e7a770648080c08504a817c800f8a5a0012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921aa00152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4a0013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7a001148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1a0011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e654901a0c8de4cced43169f9aa3d36506363b2d2c44f6c49fc1fd91ea114c86f3757077ea01e11fdd0d1934eda0492606ee0bb80a7bf8f35cc5f86ec60fe5031ba48bfd544").unwrap(); let decoded = TransactionSigned::decode_2718(&mut raw_tx.as_slice()).unwrap(); - assert_eq!(decoded.tx_type(), TxType::Eip4844); + assert!(decoded.is_eip4844()); let from = decoded.recover_signer(); assert_eq!(from, Some(address!("A83C816D4f9b2783761a22BA6FADB0eB0606D7B2"))); diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 817114fcf259..c359dcc7cf00 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -49,7 +49,7 @@ impl BlobStoreCanonTracker { .body .transactions() .iter() - .filter(|tx| tx.tx_type().is_eip4844()) + .filter(|tx| tx.is_eip4844()) .map(|tx| tx.trie_hash()); (*num, iter) }); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index a0d4d40983e4..4dfa0bcc08fc 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, - Transaction as _, + Transaction as _, Typed2718, }; use alloy_eips::{ eip2718::Encodable2718, @@ -1368,7 +1368,7 @@ impl PoolTransaction for EthPooledTransaction { /// Returns the transaction type fn tx_type(&self) -> u8 { - self.transaction.tx_type().into() + self.transaction.ty() } /// Returns the length of the rlp encoded object @@ -1444,7 +1444,7 @@ impl TryFrom for EthPooledTransaction { fn try_from(tx: RecoveredTx) -> Result { // ensure we can handle the transaction type and its format - match tx.tx_type() as u8 { + match tx.ty() { 0..=EIP1559_TX_TYPE_ID | EIP7702_TX_TYPE_ID => { // supported } From fa340b5321dde24f13fd2f8927e0fc88116a7d47 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 11:08:39 +0100 Subject: [PATCH 67/70] chore: bump version 1.1.4 (#13291) --- Cargo.lock | 244 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 39900bf5e639..bdfbd5680f4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2647,7 +2647,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5532,7 +5532,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.1.3" +version = "1.1.4" dependencies = [ "clap", "reth-cli-util", @@ -6479,7 +6479,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6552,7 +6552,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6580,7 +6580,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6636,7 +6636,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6672,7 +6672,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6710,7 +6710,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6725,7 +6725,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6754,7 +6754,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6775,7 +6775,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-genesis", "clap", @@ -6788,7 +6788,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.3" +version = "1.1.4" dependencies = [ "ahash", "alloy-consensus", @@ -6856,7 +6856,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-tasks", "tokio", @@ -6865,7 +6865,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6883,7 +6883,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6906,7 +6906,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.3" +version = "1.1.4" dependencies = [ "convert_case", "proc-macro2", @@ -6917,7 +6917,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "eyre", @@ -6933,7 +6933,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6946,7 +6946,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6963,7 +6963,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6986,7 +6986,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7027,7 +7027,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7056,7 +7056,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7085,7 +7085,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7102,7 +7102,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7129,7 +7129,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7153,7 +7153,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7181,7 +7181,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7220,7 +7220,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7268,7 +7268,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.3" +version = "1.1.4" dependencies = [ "aes", "alloy-primitives", @@ -7298,7 +7298,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7330,7 +7330,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7350,7 +7350,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.1.3" +version = "1.1.4" dependencies = [ "futures", "pin-project", @@ -7378,7 +7378,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7434,7 +7434,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7466,7 +7466,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7478,7 +7478,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-eips", @@ -7515,7 +7515,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7540,7 +7540,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.1.3" +version = "1.1.4" dependencies = [ "clap", "eyre", @@ -7551,7 +7551,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7566,7 +7566,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7586,7 +7586,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7606,7 +7606,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7631,11 +7631,11 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.1.3" +version = "1.1.4" [[package]] name = "reth-etl" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "rayon", @@ -7645,7 +7645,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7673,7 +7673,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7696,7 +7696,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7711,7 +7711,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7731,7 +7731,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7774,7 +7774,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "eyre", @@ -7807,7 +7807,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7824,7 +7824,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "serde", "serde_json", @@ -7833,7 +7833,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7859,7 +7859,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.3" +version = "1.1.4" dependencies = [ "async-trait", "bytes", @@ -7881,7 +7881,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.3" +version = "1.1.4" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7902,7 +7902,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.1.3" +version = "1.1.4" dependencies = [ "bindgen", "cc", @@ -7910,7 +7910,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.3" +version = "1.1.4" dependencies = [ "futures", "metrics", @@ -7921,14 +7921,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.3" +version = "1.1.4" dependencies = [ "futures-util", "if-addrs", @@ -7942,7 +7942,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8005,7 +8005,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8027,7 +8027,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8049,7 +8049,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8065,7 +8065,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -8078,7 +8078,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.3" +version = "1.1.4" dependencies = [ "anyhow", "bincode", @@ -8096,7 +8096,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8116,7 +8116,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8180,7 +8180,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8231,7 +8231,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8280,7 +8280,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8303,7 +8303,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.1.3" +version = "1.1.4" dependencies = [ "eyre", "http", @@ -8326,7 +8326,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8337,7 +8337,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8357,7 +8357,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8408,7 +8408,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8427,7 +8427,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8457,7 +8457,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8468,7 +8468,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8522,7 +8522,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8558,7 +8558,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8582,7 +8582,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8627,7 +8627,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.1.3" +version = "1.1.4" dependencies = [ "reth-codecs", "reth-db-api", @@ -8638,7 +8638,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8660,7 +8660,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "async-trait", @@ -8673,7 +8673,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8691,7 +8691,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8700,7 +8700,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8710,7 +8710,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8757,7 +8757,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8785,7 +8785,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8835,7 +8835,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8867,7 +8867,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -8887,7 +8887,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8905,7 +8905,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8977,7 +8977,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -9001,7 +9001,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9020,7 +9020,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9070,7 +9070,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9108,7 +9108,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9151,7 +9151,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9193,7 +9193,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "http", @@ -9210,7 +9210,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9225,7 +9225,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9242,7 +9242,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9293,7 +9293,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9322,7 +9322,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -9339,7 +9339,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "assert_matches", @@ -9363,7 +9363,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "clap", @@ -9374,7 +9374,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9398,7 +9398,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9411,7 +9411,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.3" +version = "1.1.4" dependencies = [ "auto_impl", "dyn-clone", @@ -9428,7 +9428,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9442,7 +9442,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.1.3" +version = "1.1.4" dependencies = [ "tokio", "tokio-stream", @@ -9451,7 +9451,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.3" +version = "1.1.4" dependencies = [ "clap", "eyre", @@ -9465,7 +9465,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9513,7 +9513,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9542,7 +9542,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9572,7 +9572,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9601,7 +9601,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9628,7 +9628,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.1.3" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9652,7 +9652,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.1.3" +version = "1.1.4" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index ee6282b21697..3d652c5908eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.3" +version = "1.1.4" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" From f2141925b0bbe0ad19e66110419f57325bcbae9c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 12:48:39 +0100 Subject: [PATCH 68/70] chore: rm associated type (#13292) --- crates/net/network/src/transactions/mod.rs | 6 +-- .../primitives/src/transaction/signed.rs | 2 - crates/primitives-traits/src/lib.rs | 1 - crates/primitives-traits/src/node.rs | 8 +-- .../primitives-traits/src/transaction/mod.rs | 1 - .../src/transaction/signed.rs | 20 +++---- .../src/transaction/tx_type.rs | 52 ------------------- crates/primitives/src/lib.rs | 1 - crates/primitives/src/transaction/mod.rs | 2 - crates/primitives/src/transaction/pooled.rs | 4 +- crates/primitives/src/transaction/tx_type.rs | 10 ---- 11 files changed, 17 insertions(+), 90 deletions(-) delete mode 100644 crates/primitives-traits/src/transaction/tx_type.rs diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 4ed352353b72..7e87736cc49b 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -50,7 +50,7 @@ use reth_network_p2p::{ use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, TransactionSigned}; -use reth_primitives_traits::{SignedTransaction, TxType}; +use reth_primitives_traits::SignedTransaction; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, @@ -1641,7 +1641,7 @@ impl FullTransactionsBuilder { /// /// If the transaction is unsuitable for broadcast or would exceed the softlimit, it is appended /// to list of pooled transactions, (e.g. 4844 transactions). - /// See also [`TxType::is_broadcastable_in_full`]. + /// See also [`SignedTransaction::is_broadcastable_in_full`]. fn push(&mut self, transaction: &PropagateTransaction) { // Do not send full 4844 transaction hashes to peers. // @@ -1651,7 +1651,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if !transaction.transaction.tx_type().is_broadcastable_in_full() { + if !transaction.transaction.is_broadcastable_in_full() { self.pooled.push(transaction); return } diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 41a63be3bf2e..5acf9b8a8ca7 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -70,8 +70,6 @@ impl OpTransactionSigned { } impl SignedTransaction for OpTransactionSigned { - type Type = OpTxType; - fn tx_hash(&self) -> &TxHash { self.hash.get_or_init(|| self.recalculate_hash()) } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 04d02be0b7dd..89cf6382ae54 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -27,7 +27,6 @@ pub mod transaction; pub use transaction::{ execute::FillTxEnv, signed::{FullSignedTx, SignedTransaction}, - tx_type::{FullTxType, TxType}, FullTransaction, Transaction, }; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 5b3691d2fdf7..0d46141da0c6 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,6 +1,6 @@ use crate::{ Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, - FullSignedTx, FullTxType, Receipt, SignedTransaction, TxType, + FullSignedTx, Receipt, SignedTransaction, }; use core::fmt; @@ -15,9 +15,7 @@ pub trait NodePrimitives: /// Block body primitive. type BlockBody: BlockBody; /// Signed version of the transaction type. - type SignedTx: SignedTransaction + 'static; - /// Transaction envelope type ID. - type TxType: TxType + 'static; + type SignedTx: SignedTransaction + 'static; /// A receipt. type Receipt: Receipt; } @@ -29,7 +27,6 @@ where BlockHeader: FullBlockHeader, BlockBody: FullBlockBody, SignedTx: FullSignedTx, - TxType: FullTxType, Receipt: FullReceipt, > + Send + Sync @@ -49,7 +46,6 @@ impl FullNodePrimitives for T where BlockHeader: FullBlockHeader, BlockBody: FullBlockBody, SignedTx: FullSignedTx, - TxType: FullTxType, Receipt: FullReceipt, > + Send + Sync diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index b67e51024bf6..0608ded860e6 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -2,7 +2,6 @@ pub mod execute; pub mod signed; -pub mod tx_type; use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 5e0a91b4da2b..4665467e8b39 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,6 +1,6 @@ //! API of a signed transaction. -use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; +use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use alloc::{fmt, vec::Vec}; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; @@ -31,20 +31,22 @@ pub trait SignedTransaction: + MaybeArbitrary + InMemorySize { - /// Transaction envelope type ID. - type Type: TxType; - - /// Returns the transaction type. - fn tx_type(&self) -> Self::Type { - Self::Type::try_from(self.ty()).expect("should decode tx type id") - } - /// Returns reference to transaction hash. fn tx_hash(&self) -> &TxHash; /// Returns reference to signature. fn signature(&self) -> &PrimitiveSignature; + /// Returns whether this transaction type can be __broadcasted__ as full transaction over the + /// network. + /// + /// Some transactions are not broadcastable as objects and only allowed to be broadcasted as + /// hashes, e.g. because they missing context (e.g. blob sidecar). + fn is_broadcastable_in_full(&self) -> bool { + // EIP-4844 transactions are not broadcastable in full, only hashes are allowed. + !self.is_eip4844() + } + /// Recover signer from signature and hash. /// /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also `reth_primitives::transaction::recover_signer`. diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs deleted file mode 100644 index c60cd9cb3af4..000000000000 --- a/crates/primitives-traits/src/transaction/tx_type.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Abstraction of transaction envelope type ID. - -use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; -use alloy_consensus::Typed2718; -use alloy_primitives::{U64, U8}; -use core::fmt; - -/// Helper trait that unifies all behaviour required by transaction type ID to support full node -/// operations. -pub trait FullTxType: TxType + MaybeCompact {} - -impl FullTxType for T where T: TxType + MaybeCompact {} - -/// Trait representing the behavior of a transaction type. -pub trait TxType: - Send - + Sync - + Unpin - + Clone - + Copy - + Default - + fmt::Debug - + fmt::Display - + PartialEq - + Eq - + PartialEq - + Into - + Into - + TryFrom - + TryFrom - + TryFrom - + alloy_rlp::Encodable - + alloy_rlp::Decodable - + Typed2718 - + InMemorySize - + MaybeArbitrary -{ - /// Returns whether this transaction type can be __broadcasted__ as full transaction over the - /// network. - /// - /// Some transactions are not broadcastable as objects and only allowed to be broadcasted as - /// hashes, e.g. because they missing context (e.g. blob sidecar). - fn is_broadcastable_in_full(&self) -> bool { - // EIP-4844 transactions are not broadcastable in full, only hashes are allowed. - !self.is_eip4844() - } -} - -#[cfg(feature = "op")] -impl TxType for op_alloy_consensus::OpTxType {} - -impl TxType for alloy_consensus::TxType {} diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 18fe1498b8a8..ab796d734abd 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -86,6 +86,5 @@ impl reth_primitives_traits::NodePrimitives for EthPrimitives { type BlockHeader = alloy_consensus::Header; type BlockBody = crate::BlockBody; type SignedTx = crate::TransactionSigned; - type TxType = crate::TxType; type Receipt = crate::Receipt; } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 13c71ce2094d..444090ad2950 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1030,8 +1030,6 @@ impl TransactionSigned { } impl SignedTransaction for TransactionSigned { - type Type = TxType; - fn tx_hash(&self) -> &TxHash { self.hash.get_or_init(|| self.recalculate_hash()) } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index b4790d028290..a5256583e59b 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -5,7 +5,7 @@ use super::{ error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, TxEip7702, }; -use crate::{BlobTransaction, RecoveredTx, Transaction, TransactionSigned, TxType}; +use crate::{BlobTransaction, RecoveredTx, Transaction, TransactionSigned}; use alloc::vec::Vec; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, @@ -568,8 +568,6 @@ impl alloy_consensus::Transaction for PooledTransactionsElement { } impl SignedTransaction for PooledTransactionsElement { - type Type = TxType; - fn tx_hash(&self) -> &TxHash { match self { Self::Legacy(tx) => tx.hash(), diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index d0a4786dcf16..83f954387692 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -97,8 +97,6 @@ impl Typed2718 for TxType { } } -impl reth_primitives_traits::TxType for TxType {} - impl InMemorySize for TxType { /// Calculates a heuristic for the in-memory size of the [`TxType`]. #[inline] @@ -259,16 +257,8 @@ mod tests { use super::*; use alloy_primitives::hex; use reth_codecs::Compact; - use reth_primitives_traits::TxType as _; use rstest::rstest; - #[test] - fn is_broadcastable() { - assert!(TxType::Legacy.is_broadcastable_in_full()); - assert!(TxType::Eip1559.is_broadcastable_in_full()); - assert!(!TxType::Eip4844.is_broadcastable_in_full()); - } - #[rstest] #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] From 394f973acdd89249ad5c88142b9756b5a4e049a3 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 11 Dec 2024 15:58:12 +0400 Subject: [PATCH 69/70] chore: improve `CompactZstd` macro (#13277) --- Cargo.lock | 1 - crates/primitives/src/receipt.rs | 6 +- crates/prune/types/Cargo.toml | 1 - .../codecs/derive/src/compact/generator.rs | 77 ++++++++++--------- .../storage/codecs/derive/src/compact/mod.rs | 18 +++-- crates/storage/codecs/derive/src/lib.rs | 47 +++++++++-- crates/storage/codecs/src/private.rs | 2 +- 7 files changed, 99 insertions(+), 53 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bdfbd5680f4f..abe128a632ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8872,7 +8872,6 @@ dependencies = [ "alloy-primitives", "arbitrary", "assert_matches", - "bytes", "derive_more", "modular-bitfield", "proptest", diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 62c664e22a46..549c032cee0d 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -13,8 +13,6 @@ use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; use crate::TxType; -#[cfg(feature = "reth-codec")] -use reth_zstd_compressors::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). pub use reth_primitives_traits::receipt::gas_spent_by_transactions; @@ -25,6 +23,10 @@ pub use reth_primitives_traits::receipt::gas_spent_by_transactions; )] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::CompactZstd))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] +#[cfg_attr(any(test, feature = "reth-codec"), reth_zstd( + compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, + decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR +))] #[rlp(trailing)] pub struct Receipt { /// Receipt type. diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 5446d6f76ff5..75093511310d 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -15,7 +15,6 @@ workspace = true reth-codecs.workspace = true alloy-primitives.workspace = true -bytes.workspace = true derive_more.workspace = true modular-bitfield.workspace = true serde.workspace = true diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 63fef05ad705..a84913f59e81 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -1,6 +1,7 @@ //! Code generator for the `Compact` trait. use super::*; +use crate::ZstdConfig; use convert_case::{Case, Casing}; use syn::{Attribute, LitStr}; @@ -10,20 +11,20 @@ pub fn generate_from_to( attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, - is_zstd: bool, + zstd: Option, ) -> TokenStream2 { let flags = format_ident!("{ident}Flags"); - let to_compact = generate_to_compact(fields, ident, is_zstd); - let from_compact = generate_from_compact(fields, ident, is_zstd); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + + let to_compact = generate_to_compact(fields, ident, zstd.clone(), &reth_codecs); + let from_compact = generate_from_compact(fields, ident, zstd); let snake_case_ident = ident.to_string().to_case(Case::Snake); let fuzz = format_ident!("fuzz_test_{snake_case_ident}"); let test = format_ident!("fuzz_{snake_case_ident}"); - let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); - let lifetime = if has_lifetime { quote! { 'a } } else { @@ -77,7 +78,7 @@ pub fn generate_from_to( #fuzz_tests #impl_compact { - fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]> { + fn to_compact(&self, buf: &mut B) -> usize where B: #reth_codecs::__private::bytes::BufMut + AsMut<[u8]> { let mut flags = #flags::default(); let mut total_length = 0; #(#to_compact)* @@ -92,7 +93,11 @@ pub fn generate_from_to( } /// Generates code to implement the `Compact` trait method `to_compact`. -fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> TokenStream2 { +fn generate_from_compact( + fields: &FieldList, + ident: &Ident, + zstd: Option, +) -> TokenStream2 { let mut lines = vec![]; let mut known_types = vec!["B256", "Address", "Bloom", "Vec", "TxHash", "BlockHash", "FixedBytes"]; @@ -147,38 +152,41 @@ fn generate_from_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> To // If the type has compression support, then check the `__zstd` flag. Otherwise, use the default // code branch. However, even if it's a type with compression support, not all values are // to be compressed (thus the zstd flag). Ideally only the bigger ones. - is_zstd - .then(|| { - let decompressor = format_ident!("{}_DECOMPRESSOR", ident.to_string().to_uppercase()); - quote! { - if flags.__zstd() != 0 { - #decompressor.with(|decompressor| { - let decompressor = &mut decompressor.borrow_mut(); - let decompressed = decompressor.decompress(buf); - let mut original_buf = buf; - - let mut buf: &[u8] = decompressed; - #(#lines)* - (obj, original_buf) - }) - } else { + if let Some(zstd) = zstd { + let decompressor = zstd.decompressor; + quote! { + if flags.__zstd() != 0 { + #decompressor.with(|decompressor| { + let decompressor = &mut decompressor.borrow_mut(); + let decompressed = decompressor.decompress(buf); + let mut original_buf = buf; + + let mut buf: &[u8] = decompressed; #(#lines)* - (obj, buf) - } - } - }) - .unwrap_or_else(|| { - quote! { + (obj, original_buf) + }) + } else { #(#lines)* (obj, buf) } - }) + } + } else { + quote! { + #(#lines)* + (obj, buf) + } + } } /// Generates code to implement the `Compact` trait method `from_compact`. -fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec { +fn generate_to_compact( + fields: &FieldList, + ident: &Ident, + zstd: Option, + reth_codecs: &syn::Path, +) -> Vec { let mut lines = vec![quote! { - let mut buffer = bytes::BytesMut::new(); + let mut buffer = #reth_codecs::__private::bytes::BytesMut::new(); }]; let is_enum = fields.iter().any(|field| matches!(field, FieldTypes::EnumVariant(_))); @@ -198,7 +206,7 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< // Just because a type supports compression, doesn't mean all its values are to be compressed. // We skip the smaller ones, and thus require a flag` __zstd` to specify if this value is // compressed or not. - if is_zstd { + if zstd.is_some() { lines.push(quote! { let mut zstd = buffer.len() > 7; if zstd { @@ -214,9 +222,8 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< buf.put_slice(&flags); }); - if is_zstd { - let compressor = format_ident!("{}_COMPRESSOR", ident.to_string().to_uppercase()); - + if let Some(zstd) = zstd { + let compressor = zstd.compressor; lines.push(quote! { if zstd { #compressor.with(|compressor| { diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index 1c1723d2ec94..ab9ed78e164e 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -1,7 +1,7 @@ use proc_macro::TokenStream; use proc_macro2::{Ident, TokenStream as TokenStream2}; use quote::{format_ident, quote}; -use syn::{parse_macro_input, Data, DeriveInput, Generics}; +use syn::{Data, DeriveInput, Generics}; mod generator; use generator::*; @@ -15,6 +15,8 @@ use flags::*; mod structs; use structs::*; +use crate::ZstdConfig; + // Helper Alias type type IsCompact = bool; // Helper Alias type @@ -40,16 +42,16 @@ pub enum FieldTypes { } /// Derives the `Compact` trait and its from/to implementations. -pub fn derive(input: TokenStream, is_zstd: bool) -> TokenStream { +pub fn derive(input: DeriveInput, zstd: Option) -> TokenStream { let mut output = quote! {}; - let DeriveInput { ident, data, generics, attrs, .. } = parse_macro_input!(input); + let DeriveInput { ident, data, generics, attrs, .. } = input; let has_lifetime = has_lifetime(&generics); let fields = get_fields(&data); - output.extend(generate_flag_struct(&ident, &attrs, has_lifetime, &fields, is_zstd)); - output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, is_zstd)); + output.extend(generate_flag_struct(&ident, &attrs, has_lifetime, &fields, zstd.is_some())); + output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, zstd)); output.into() } @@ -236,7 +238,7 @@ mod tests { let DeriveInput { ident, data, attrs, .. } = parse2(f_struct).unwrap(); let fields = get_fields(&data); output.extend(generate_flag_struct(&ident, &attrs, false, &fields, false)); - output.extend(generate_from_to(&ident, &attrs, false, &fields, false)); + output.extend(generate_from_to(&ident, &attrs, false, &fields, None)); // Expected output in a TokenStream format. Commas matter! let should_output = quote! { @@ -298,10 +300,10 @@ mod tests { fuzz_test_test_struct(TestStruct::default()) } impl reth_codecs::Compact for TestStruct { - fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]> { + fn to_compact(&self, buf: &mut B) -> usize where B: reth_codecs::__private::bytes::BufMut + AsMut<[u8]> { let mut flags = TestStructFlags::default(); let mut total_length = 0; - let mut buffer = bytes::BytesMut::new(); + let mut buffer = reth_codecs::__private::bytes::BytesMut::new(); let f_u64_len = self.f_u64.to_compact(&mut buffer); flags.set_f_u64_len(f_u64_len as u8); let f_u256_len = self.f_u256.to_compact(&mut buffer); diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 0b4015830f5d..a835e8fab3c2 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -20,6 +20,12 @@ use syn::{ mod arbitrary; mod compact; +#[derive(Clone)] +pub(crate) struct ZstdConfig { + compressor: syn::Path, + decompressor: syn::Path, +} + /// Derives the `Compact` trait for custom structs, optimizing serialization with a possible /// bitflag struct. /// @@ -51,15 +57,46 @@ mod compact; /// efficient decoding. #[proc_macro_derive(Compact, attributes(maybe_zero, reth_codecs))] pub fn derive(input: TokenStream) -> TokenStream { - let is_zstd = false; - compact::derive(input, is_zstd) + compact::derive(parse_macro_input!(input as DeriveInput), None) } /// Adds `zstd` compression to derived [`Compact`]. -#[proc_macro_derive(CompactZstd, attributes(maybe_zero, reth_codecs))] +#[proc_macro_derive(CompactZstd, attributes(maybe_zero, reth_codecs, reth_zstd))] pub fn derive_zstd(input: TokenStream) -> TokenStream { - let is_zstd = true; - compact::derive(input, is_zstd) + let input = parse_macro_input!(input as DeriveInput); + + let mut compressor = None; + let mut decompressor = None; + + for attr in &input.attrs { + if attr.path().is_ident("reth_zstd") { + if let Err(err) = attr.parse_nested_meta(|meta| { + if meta.path.is_ident("compressor") { + let value = meta.value()?; + let path: syn::Path = value.parse()?; + compressor = Some(path); + } else if meta.path.is_ident("decompressor") { + let value = meta.value()?; + let path: syn::Path = value.parse()?; + decompressor = Some(path); + } else { + return Err(meta.error("unsupported attribute")) + } + Ok(()) + }) { + return err.to_compile_error().into() + } + } + } + + let (Some(compressor), Some(decompressor)) = (compressor, decompressor) else { + return quote! { + compile_error!("missing compressor or decompressor attribute"); + } + .into() + }; + + compact::derive(input, Some(ZstdConfig { compressor, decompressor })) } /// Generates tests for given type. diff --git a/crates/storage/codecs/src/private.rs b/crates/storage/codecs/src/private.rs index 6f54d9c9ca82..440310ffd3df 100644 --- a/crates/storage/codecs/src/private.rs +++ b/crates/storage/codecs/src/private.rs @@ -1,3 +1,3 @@ pub use modular_bitfield; -pub use bytes::Buf; +pub use bytes::{self, Buf}; From b6e682ef739354f4b280cea0ba1a1c370820fce9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 11 Dec 2024 13:25:51 +0100 Subject: [PATCH 70/70] chore: move tx errors (#13288) --- .../src/transaction/error.rs | 17 ++++++++--------- crates/primitives-traits/src/transaction/mod.rs | 2 ++ crates/primitives/src/transaction/mod.rs | 11 ++++++----- crates/primitives/src/transaction/pooled.rs | 3 +-- 4 files changed, 17 insertions(+), 16 deletions(-) rename crates/{primitives => primitives-traits}/src/transaction/error.rs (91%) diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives-traits/src/transaction/error.rs similarity index 91% rename from crates/primitives/src/transaction/error.rs rename to crates/primitives-traits/src/transaction/error.rs index 78f6cf5e5fd3..15d1d44ac594 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives-traits/src/transaction/error.rs @@ -1,8 +1,9 @@ +//! Various error variants that can happen when working with transactions. + use crate::GotExpectedBoxed; use alloy_primitives::U256; -/// Represents error variants that can happen when trying to validate a -/// [Transaction](crate::Transaction) +/// Represents error variants that can happen when trying to validate a transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] pub enum InvalidTransactionError { /// The sender does not have enough funds to cover the transaction fees @@ -64,19 +65,17 @@ pub enum InvalidTransactionError { impl core::error::Error for InvalidTransactionError {} -/// Represents error variants that can happen when trying to convert a transaction to -/// [`PooledTransactionsElement`](crate::PooledTransactionsElement) +/// Represents error variants that can happen when trying to convert a transaction to pooled +/// transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display, derive_more::Error)] pub enum TransactionConversionError { - /// This error variant is used when a transaction cannot be converted into a - /// [`PooledTransactionsElement`](crate::PooledTransactionsElement) because it is not supported - /// for P2P network. + /// This error variant is used when a transaction cannot be converted into a pooled transaction + /// because it is not supported for P2P network. #[display("Transaction is not supported for p2p")] UnsupportedForP2P, } -/// Represents error variants than can happen when trying to convert a -/// [`RecoveredTx`](crate::RecoveredTx) transaction. +/// Represents error variants than can happen when trying to convert a recovered transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] pub enum TryFromRecoveredTransactionError { /// Thrown if the transaction type is unsupported. diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 0608ded860e6..27b5b289c2d1 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -3,6 +3,8 @@ pub mod execute; pub mod signed; +pub mod error; + use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; use core::{fmt, hash::Hash}; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 444090ad2950..4431d1f9cdea 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -32,12 +32,14 @@ use signature::decode_with_eip155_chain_id; use std::sync::{LazyLock, OnceLock}; pub use compat::FillTxEnv; -pub use error::{ - InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, -}; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -pub use reth_primitives_traits::WithEncoded; +pub use reth_primitives_traits::{ + transaction::error::{ + InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, + }, + WithEncoded, +}; pub use sidecar::BlobTransaction; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; @@ -49,7 +51,6 @@ pub mod util; pub(crate) mod access_list; mod compat; -mod error; mod meta; mod pooled; mod sidecar; diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index a5256583e59b..ffcffef4f6e6 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -2,8 +2,7 @@ //! response to `GetPooledTransactions`. use super::{ - error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, - TxEip7702, + recover_signer_unchecked, signature::recover_signer, TransactionConversionError, TxEip7702, }; use crate::{BlobTransaction, RecoveredTx, Transaction, TransactionSigned}; use alloc::vec::Vec;