From 1b9b0d57132bb64351a600d2540af052ec23acde Mon Sep 17 00:00:00 2001 From: cchudant Date: Mon, 23 Sep 2024 08:28:38 -0700 Subject: [PATCH] fix(block-production): fix bouncer, error reporting, debug messages (#271) Co-authored-by: antiyro Co-authored-by: Arun Jangra Co-authored-by: apoorvsadana <95699312+apoorvsadana@users.noreply.github.com> Co-authored-by: mohiiit Co-authored-by: Trantorian1 <114066155+Trantorian1@users.noreply.github.com> --- .github/workflows/coverage.yml | 2 +- .github/workflows/linters-cargo.yml | 2 +- .github/workflows/rust-check.yml | 2 +- CHANGELOG.md | 1 + Cargo.lock | 8 + Cargo.toml | 1 + cairo/Scarb.toml | 6 +- crates/client/block_import/Cargo.toml | 4 + crates/client/block_import/src/lib.rs | 43 ++- crates/client/block_import/src/metrics.rs | 96 ++++++ .../client/block_import/src/pre_validate.rs | 14 +- crates/client/block_import/src/types.rs | 3 + .../client/block_import/src/verify_apply.rs | 36 +- crates/client/db/src/db_metrics.rs | 21 +- crates/client/db/src/lib.rs | 45 +-- crates/client/db/src/tests/common/mod.rs | 4 +- crates/client/db/src/tests/test_open.rs | 6 +- crates/client/devnet/Cargo.toml | 1 + crates/client/devnet/src/lib.rs | 6 +- crates/client/eth/src/client.rs | 4 +- crates/client/eth/src/l1_messaging.rs | 6 +- crates/client/eth/src/state_update.rs | 6 +- .../exec/src/blockifier_state_adapter.rs | 44 ++- crates/client/exec/src/lib.rs | 2 +- crates/client/mempool/src/block_production.rs | 239 ++++++++----- crates/client/mempool/src/close_block.rs | 5 +- crates/client/mempool/src/inner.rs | 9 +- crates/client/mempool/src/lib.rs | 19 +- crates/client/metrics/src/lib.rs | 10 +- crates/client/rpc/src/errors.rs | 1 + crates/client/rpc/src/providers/mempool.rs | 30 +- crates/client/sync/src/fetch/fetchers.rs | 1 + crates/client/sync/src/l2.rs | 86 +---- crates/client/sync/src/lib.rs | 13 +- crates/node/src/main.rs | 17 +- crates/node/src/service/block_production.rs | 4 +- crates/node/src/service/l1.rs | 4 +- crates/node/src/service/rpc.rs | 4 +- crates/node/src/service/sync.rs | 20 +- crates/node/src/util.rs | 5 +- .../chain_config/presets/integration.yaml | 1 + .../chain_config/presets/mainnet.yaml | 1 + .../chain_config/presets/sepolia.yaml | 1 + .../primitives/chain_config/presets/test.yaml | 1 + .../chain_config/src/chain_config.rs | 55 ++- crates/primitives/class/Cargo.toml | 2 + crates/primitives/class/src/class_hash.rs | 320 +----------------- crates/primitives/class/src/convert.rs | 86 +++++ crates/primitives/class/src/lib.rs | 1 + crates/primitives/convert/src/lib.rs | 2 +- crates/primitives/convert/src/to_felt.rs | 28 ++ crates/primitives/transactions/Cargo.toml | 1 + .../src/broadcasted_to_blockifier.rs | 11 +- 53 files changed, 722 insertions(+), 618 deletions(-) create mode 100644 crates/client/block_import/src/metrics.rs create mode 100644 crates/primitives/class/src/convert.rs diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 7268f01aa..3cbdef512 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -25,7 +25,7 @@ jobs: - uses: software-mansion/setup-scarb@v1 with: - scarb-version: "2.7.0" + scarb-version: "2.8.2" - uses: foundry-rs/foundry-toolchain@v1 with: diff --git a/.github/workflows/linters-cargo.yml b/.github/workflows/linters-cargo.yml index 04200c24b..bbf415f85 100644 --- a/.github/workflows/linters-cargo.yml +++ b/.github/workflows/linters-cargo.yml @@ -20,7 +20,7 @@ jobs: - uses: software-mansion/setup-scarb@v1 with: - scarb-version: "2.7.0" + scarb-version: "2.8.2" - name: Setup build deps run: | diff --git a/.github/workflows/rust-check.yml b/.github/workflows/rust-check.yml index 71eccb581..959db5e6b 100644 --- a/.github/workflows/rust-check.yml +++ b/.github/workflows/rust-check.yml @@ -19,7 +19,7 @@ jobs: - uses: software-mansion/setup-scarb@v1 with: - scarb-version: "2.7.0" + scarb-version: "2.8.2" - name: Check the project run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 704b45433..0684e53ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Next release +- fix(block-production): fix bouncer calculation and declared classes - fix: Fix pending block sync and add real FGW tests - test: tests added for verify and apply task in l2 sync - fix: UDC cairo 0 migration & events logic fix diff --git a/Cargo.lock b/Cargo.lock index fd8acd16b..d819f172c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5554,10 +5554,13 @@ dependencies = [ name = "mc-block-import" version = "0.7.0" dependencies = [ + "anyhow", "bitvec", "bonsai-trie", + "itertools 0.13.0", "log", "mc-db", + "mc-metrics", "mp-block", "mp-chain-config", "mp-class", @@ -5566,6 +5569,7 @@ dependencies = [ "mp-state-update", "mp-transactions", "mp-utils", + "num-traits 0.2.19", "rayon", "rstest 0.18.2", "serde", @@ -5618,6 +5622,7 @@ dependencies = [ "mc-block-import", "mc-db", "mc-mempool", + "mc-metrics", "mockall", "mp-block", "mp-chain-config", @@ -5958,6 +5963,7 @@ dependencies = [ name = "mp-class" version = "0.7.0" dependencies = [ + "anyhow", "blockifier", "cairo-lang-starknet 1.0.0-alpha.6", "cairo-lang-starknet 1.0.0-rc0", @@ -5974,6 +5980,7 @@ dependencies = [ "num-bigint", "serde", "serde_json", + "sha3", "starknet-core", "starknet-providers", "starknet-types-core", @@ -6033,6 +6040,7 @@ dependencies = [ "cairo-lang-starknet-classes", "cairo-lang-utils 2.7.0", "cairo-vm", + "log", "mp-chain-config", "mp-class", "mp-convert", diff --git a/Cargo.toml b/Cargo.toml index f3a94570c..f43046b2f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,6 +175,7 @@ tempfile = "3.10.1" env_logger = "0.11.3" mockall = "0.13.0" serial_test = "3.1.1" +itertools = "0.13.0" [patch.crates-io] starknet-core = { git = "https://github.com/kasarlabs/starknet-rs.git", branch = "fork" } diff --git a/cairo/Scarb.toml b/cairo/Scarb.toml index 503d9300d..edca95458 100644 --- a/cairo/Scarb.toml +++ b/cairo/Scarb.toml @@ -2,14 +2,14 @@ name = "madara_contracts" version = "0.1.0" edition = "2023_11" -cairo-version = "2.7.0" -scarb-version = "2.7.0" +cairo-version = "2.8.2" +scarb-version = "2.8.2" authors = ["The Madara Committee "] description = "Cairo contracts for boostrapping a madara network." [dependencies] openzeppelin = { git = "https://github.com/OpenZeppelin/cairo-contracts.git", tag = "v0.15.1" } -starknet = "2.7.0" +starknet = "2.8.2" [dev-dependencies] snforge_std = { git = "https://github.com/foundry-rs/starknet-foundry", tag = "v0.27.0" } diff --git a/crates/client/block_import/Cargo.toml b/crates/client/block_import/Cargo.toml index 8721542ab..f1ca6e95d 100644 --- a/crates/client/block_import/Cargo.toml +++ b/crates/client/block_import/Cargo.toml @@ -9,14 +9,18 @@ version.workspace = true license.workspace = true [dependencies] +anyhow.workspace = true bitvec.workspace = true +itertools.workspace = true log.workspace = true +num-traits.workspace = true rayon.workspace = true serde.workspace = true thiserror.workspace = true tokio.workspace = true mc-db = { workspace = true } +mc-metrics = { workspace = true } mp-block = { workspace = true } mp-chain-config = { workspace = true } mp-class = { workspace = true } diff --git a/crates/client/block_import/src/lib.rs b/crates/client/block_import/src/lib.rs index 33a180091..2b6a4e4a3 100644 --- a/crates/client/block_import/src/lib.rs +++ b/crates/client/block_import/src/lib.rs @@ -38,11 +38,15 @@ //! to check for errors. //! A signature verification mode should be added to allow the skipping of block validation entirely if the block is signed. +use anyhow::Context; use mc_db::{MadaraBackend, MadaraStorageError}; +use mc_metrics::MetricsRegistry; +use metrics::BlockMetrics; use mp_class::{class_hash::ComputeClassHashError, compile::ClassCompilationError}; use starknet_core::types::Felt; use std::{borrow::Cow, sync::Arc}; +mod metrics; mod pre_validate; mod rayon; pub mod tests; @@ -113,13 +117,39 @@ impl BlockImportError { } pub struct BlockImporter { pool: Arc, + backend: Arc, verify_apply: VerifyApply, + metrics: BlockMetrics, + always_force_flush: bool, } impl BlockImporter { - pub fn new(backend: Arc) -> Self { + /// The starting block is used for metrics. Setting it to None means it will look at the database latest block number. + pub fn new( + backend: Arc, + metrics_registry: &MetricsRegistry, + starting_block: Option, + always_force_flush: bool, + ) -> anyhow::Result { let pool = Arc::new(RayonPool::new()); - Self { verify_apply: VerifyApply::new(Arc::clone(&backend), Arc::clone(&pool)), pool } + let starting_block = if let Some(n) = starting_block { + n + } else { + backend + .get_latest_block_n() + .context("Getting latest block in database")? + .map(|b| /* next block */ b + 1) + .unwrap_or(0 /* genesis */) + }; + + Ok(Self { + verify_apply: VerifyApply::new(Arc::clone(&backend), Arc::clone(&pool)), + pool, + metrics: BlockMetrics::register(starting_block, metrics_registry) + .context("Registering metrics for block import")?, + backend, + always_force_flush, + }) } /// Perform [`BlockImporter::pre_validate`] followed by [`BlockImporter::verify_apply`] to import a block. @@ -145,7 +175,14 @@ impl BlockImporter { block: PreValidatedBlock, validation: BlockValidationContext, ) -> Result { - self.verify_apply.verify_apply(block, validation).await + let result = self.verify_apply.verify_apply(block, validation).await?; + // Flush step. + let force = self.always_force_flush; + self.backend + .maybe_flush(force) + .map_err(|err| BlockImportError::Internal(format!("DB flushing error: {err:#}").into()))?; + self.metrics.update(&result.header, &self.backend); + Ok(result) } pub async fn pre_validate_pending( diff --git a/crates/client/block_import/src/metrics.rs b/crates/client/block_import/src/metrics.rs new file mode 100644 index 000000000..5e3b3db8b --- /dev/null +++ b/crates/client/block_import/src/metrics.rs @@ -0,0 +1,96 @@ +use mc_db::MadaraBackend; +use mc_metrics::{Gauge, MetricsRegistry, PrometheusError, F64}; +use mp_block::Header; +use num_traits::FromPrimitive; +use std::{ + sync::Mutex, + time::{Duration, Instant}, +}; + +#[derive(Debug)] +pub struct BlockMetrics { + /// Starting block + pub starting_block: u64, + pub starting_time: Instant, + pub last_update_instant: Mutex>, + pub last_db_metrics_update_instant: Mutex>, + + // L2 network metrics + pub l2_block_number: Gauge, + pub l2_sync_time: Gauge, + pub l2_avg_sync_time: Gauge, + pub l2_latest_sync_time: Gauge, + pub l2_state_size: Gauge, // TODO: remove this, as well as the return value from db_metrics update. + pub transaction_count: Gauge, + pub event_count: Gauge, + // L1 network metrics + pub l1_gas_price_wei: Gauge, + pub l1_gas_price_strk: Gauge, +} + +impl BlockMetrics { + pub fn register(starting_block: u64, registry: &MetricsRegistry) -> Result { + Ok(Self { + starting_block, + starting_time: Instant::now(), + last_update_instant: Default::default(), + last_db_metrics_update_instant: Default::default(), + + l2_block_number: registry.register(Gauge::new("madara_l2_block_number", "Current block number")?)?, + l2_sync_time: registry.register(Gauge::new( + "madara_l2_sync_time", + "Complete sync time since startup in secs (does not account for restarts)", + )?)?, + l2_avg_sync_time: registry.register(Gauge::new( + "madara_l2_avg_sync_time", + "Average time spent between blocks since startup in secs", + )?)?, + l2_latest_sync_time: registry + .register(Gauge::new("madara_l2_latest_sync_time", "Latest time spent between blocks in secs")?)?, + l2_state_size: registry.register(Gauge::new("madara_l2_state_size", "Node storage usage in GB")?)?, + transaction_count: registry + .register(Gauge::new("madara_transaction_count", "Latest block transaction count")?)?, + event_count: registry.register(Gauge::new("madara_event_count", "Latest block event count")?)?, + l1_gas_price_wei: registry + .register(Gauge::new("madara_l1_block_gas_price", "Latest block L1 ETH gas price")?)?, + l1_gas_price_strk: registry + .register(Gauge::new("madara_l1_block_gas_price_strk", "Latest block L1 STRK gas price")?)?, + }) + } + + pub fn update(&self, block_header: &Header, backend: &MadaraBackend) { + let now = Instant::now(); + + // Update Block sync time metrics + let latest_sync_time = { + let mut last_update = self.last_update_instant.lock().expect("Poisoned lock"); + let latest_sync_time = last_update.map(|inst| now.duration_since(inst)).unwrap_or_default(); + *last_update = Some(now); + latest_sync_time.as_secs_f64() + }; + + let total_sync_time = now.duration_since(self.starting_time).as_secs_f64(); + self.l2_sync_time.set(total_sync_time); + self.l2_latest_sync_time.set(latest_sync_time); + self.l2_avg_sync_time.set(total_sync_time / (block_header.block_number - self.starting_block) as f64); + + self.l2_block_number.set(block_header.block_number as f64); + self.transaction_count.set(f64::from_u64(block_header.transaction_count).unwrap_or(0f64)); + self.event_count.set(f64::from_u64(block_header.event_count).unwrap_or(0f64)); + + self.l1_gas_price_wei.set(f64::from_u128(block_header.l1_gas_price.eth_l1_gas_price).unwrap_or(0f64)); + self.l1_gas_price_strk.set(f64::from_u128(block_header.l1_gas_price.strk_l1_gas_price).unwrap_or(0f64)); + + { + let mut last_db_instant = self.last_db_metrics_update_instant.lock().expect("Poisoned lock"); + let last_update_duration = last_db_instant.map(|inst| now.duration_since(inst)); + + if last_update_duration.is_none() || last_update_duration.is_some_and(|d| d >= Duration::from_secs(5)) { + *last_db_instant = Some(now); + let storage_size = backend.update_metrics(); + let size_gb = storage_size as f64 / (1024 * 1024 * 1024) as f64; + self.l2_state_size.set(size_gb); + } + } + } +} diff --git a/crates/client/block_import/src/pre_validate.rs b/crates/client/block_import/src/pre_validate.rs index 76401712b..d4d16f41d 100644 --- a/crates/client/block_import/src/pre_validate.rs +++ b/crates/client/block_import/src/pre_validate.rs @@ -59,6 +59,8 @@ pub fn pre_validate_inner( .map(|f| f()) .collect::>()?; + converted_classes.extend(block.trusted_converted_classes); + Ok(PreValidatedBlock { header: block.header, transactions: block.transactions, @@ -172,7 +174,17 @@ fn class_conversion( } DeclaredClass::Legacy(legacy) => { log::trace!("Converting legacy class with hash {:#x}", legacy.class_hash); - // TODO: verify that the class hash is correct + if !validation.trust_class_hashes { + let class_hash = legacy + .contract_class + .compute_class_hash() + .map_err(|e| BlockImportError::ComputeClassHash { class_hash: legacy.class_hash, error: e })?; + if class_hash != legacy.class_hash { + // TODO: For now we skip the exceptions for the legacy class hash mismatch + log::debug!("Class hash mismatch: got {:#x}, expected {:#x}", class_hash, legacy.class_hash,); + // return Err(BlockImportError::ClassHash { got: class_hash, expected: legacy.class_hash }); + } + } Ok(ConvertedClass::Legacy(LegacyConvertedClass { class_hash: legacy.class_hash, info: LegacyClassInfo { contract_class: Arc::new(legacy.contract_class) }, diff --git a/crates/client/block_import/src/types.rs b/crates/client/block_import/src/types.rs index 1c03d5aa7..e19ff2d25 100644 --- a/crates/client/block_import/src/types.rs +++ b/crates/client/block_import/src/types.rs @@ -161,6 +161,9 @@ pub struct UnverifiedFullBlock { pub transactions: Vec, pub receipts: Vec, pub declared_classes: Vec, + /// Classes that are already compiled and hashed. + #[serde(skip)] + pub trusted_converted_classes: Vec, pub commitments: UnverifiedCommitments, } diff --git a/crates/client/block_import/src/verify_apply.rs b/crates/client/block_import/src/verify_apply.rs index a104fe9da..ebfcadf84 100644 --- a/crates/client/block_import/src/verify_apply.rs +++ b/crates/client/block_import/src/verify_apply.rs @@ -1,26 +1,25 @@ -use std::{borrow::Cow, sync::Arc}; - +use crate::{ + BlockImportError, BlockImportResult, BlockValidationContext, PendingBlockImportResult, PreValidatedBlock, + PreValidatedPendingBlock, RayonPool, UnverifiedHeader, ValidatedCommitments, +}; +use itertools::Itertools; use mc_db::{MadaraBackend, MadaraStorageError}; use mp_block::{ header::PendingHeader, BlockId, BlockTag, Header, MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlockInfo, }; -use mp_convert::ToFelt; +use mp_convert::{FeltHexDisplay, ToFelt}; use starknet_api::core::ChainId; use starknet_core::types::Felt; use starknet_types_core::hash::{Poseidon, StarkHash}; - -use crate::{ - BlockImportError, BlockImportResult, BlockValidationContext, PendingBlockImportResult, PreValidatedBlock, - PreValidatedPendingBlock, RayonPool, UnverifiedHeader, ValidatedCommitments, -}; +use std::{borrow::Cow, sync::Arc}; mod classes; mod contracts; pub struct VerifyApply { pool: Arc, - backend: Arc, + pub(crate) backend: Arc, // Only one thread at once can verify_apply. This is the update trie step cannot be parallelized over blocks, and in addition // our database does not support concurrent write access. mutex: tokio::sync::Mutex<()>, @@ -199,11 +198,26 @@ fn update_tries( ) -> Result { if validation.trust_global_tries { let Some(global_state_root) = block.unverified_global_state_root else { - return Err(BlockImportError::Internal("Trying to import a block without a global state root but ".into())); + return Err(BlockImportError::Internal( + "Trying to import a block without a global state root when using trust_global_tries".into(), + )); }; return Ok(global_state_root); } + log::debug!( + "Deployed contracts: [{:?}]", + block.state_diff.deployed_contracts.iter().map(|c| c.address.hex_display()).format(", ") + ); + log::debug!( + "Declared classes: [{:?}]", + block.state_diff.declared_classes.iter().map(|c| c.class_hash.hex_display()).format(", ") + ); + log::debug!( + "Deprecated declared classes: [{:?}]", + block.state_diff.deprecated_declared_classes.iter().map(|c| c.hex_display()).format(", ") + ); + let (contract_trie_root, class_trie_root) = rayon::join( || { contracts::contract_trie_root( @@ -465,7 +479,7 @@ mod verify_apply_tests { StateDiff::default(), // Empty state diff true, // Trust global tries (irrelevant in this case) // Expected result: an Internal error - Err(BlockImportError::Internal("Trying to import a block without a global state root but ".into())) + Err(BlockImportError::Internal("Trying to import a block without a global state root when using trust_global_tries".into())) )] #[case::mismatch_global_state_root( Some(felt!("0xb")), // A non-zero global state root diff --git a/crates/client/db/src/db_metrics.rs b/crates/client/db/src/db_metrics.rs index de04082b2..2f6a42943 100644 --- a/crates/client/db/src/db_metrics.rs +++ b/crates/client/db/src/db_metrics.rs @@ -1,15 +1,34 @@ -use mc_metrics::{IntGaugeVec, MetricsRegistry, Opts, PrometheusError}; +use crate::{Column, DatabaseExt, DB}; +use mc_metrics::{Gauge, IntGaugeVec, MetricsRegistry, Opts, PrometheusError, F64}; #[derive(Clone, Debug)] pub struct DbMetrics { + pub db_size: Gauge, pub column_sizes: IntGaugeVec, } impl DbMetrics { pub fn register(registry: &MetricsRegistry) -> Result { Ok(Self { + db_size: registry.register(Gauge::new("db_size", "Node storage usage in GB")?)?, column_sizes: registry .register(IntGaugeVec::new(Opts::new("column_sizes", "Sizes of RocksDB columns"), &["column"])?)?, }) } + + /// Returns the total storage size + pub fn update(&self, db: &DB) -> u64 { + let mut storage_size = 0; + + for &column in Column::ALL.iter() { + let cf_handle = db.get_column(column); + let cf_metadata = db.get_column_family_metadata_cf(&cf_handle); + let column_size = cf_metadata.size; + storage_size += column_size; + + self.column_sizes.with_label_values(&[column.rocksdb_name()]).set(column_size as i64); + } + + storage_size + } } diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index fe4f625a7..c76f0a7df 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -10,6 +10,7 @@ use bonsai_db::{BonsaiDb, DatabaseKeyMapping}; use bonsai_trie::id::BasicId; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; use db_metrics::DbMetrics; +use mc_metrics::MetricsRegistry; use mp_chain_config::ChainConfig; use mp_utils::service::Service; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; @@ -306,6 +307,7 @@ pub struct MadaraBackend { db: Arc, last_flush_time: Mutex>, chain_config: Arc, + db_metrics: DbMetrics, #[cfg(feature = "testing")] _temp_dir: Option, } @@ -333,12 +335,18 @@ impl DatabaseService { backup_dir: Option, restore_from_latest_backup: bool, chain_config: Arc, + metrics_registry: &MetricsRegistry, ) -> anyhow::Result { log::info!("💾 Opening database at: {}", base_path.display()); - let handle = - MadaraBackend::open(base_path.to_owned(), backup_dir.clone(), restore_from_latest_backup, chain_config) - .await?; + let handle = MadaraBackend::open( + base_path.to_owned(), + backup_dir.clone(), + restore_from_latest_backup, + chain_config, + metrics_registry, + ) + .await?; Ok(Self { handle }) } @@ -346,6 +354,11 @@ impl DatabaseService { pub fn backend(&self) -> &Arc { &self.handle } + + #[cfg(any(test, feature = "testing"))] + pub fn open_for_testing(chain_config: Arc) -> Self { + Self { handle: MadaraBackend::open_for_testing(chain_config) } + } } impl Service for DatabaseService {} @@ -358,6 +371,7 @@ struct BackupRequest { impl Drop for MadaraBackend { fn drop(&mut self) { log::info!("⏳ Gracefully closing the database..."); + self.maybe_flush(true).expect("Error when flushing the database"); // flush :) } } @@ -374,6 +388,7 @@ impl MadaraBackend { db: open_rocksdb(temp_dir.as_ref(), true).unwrap(), last_flush_time: Default::default(), chain_config, + db_metrics: DbMetrics::register(&MetricsRegistry::dummy()).unwrap(), _temp_dir: Some(temp_dir), }) } @@ -384,6 +399,7 @@ impl MadaraBackend { backup_dir: Option, restore_from_latest_backup: bool, chain_config: Arc, + metrics_registry: &MetricsRegistry, ) -> Result> { let db_path = db_config_dir.join("db"); @@ -411,6 +427,7 @@ impl MadaraBackend { let db = open_rocksdb(&db_path, true)?; let backend = Arc::new(Self { + db_metrics: DbMetrics::register(metrics_registry).context("Registering db metrics")?, backup_handle, db, last_flush_time: Default::default(), @@ -424,12 +441,12 @@ impl MadaraBackend { pub fn maybe_flush(&self, force: bool) -> Result { let mut inst = self.last_flush_time.lock().expect("poisoned mutex"); - let should_flush = force + let will_flush = force || match *inst { Some(inst) => inst.elapsed() >= Duration::from_secs(5), None => true, }; - if should_flush { + if will_flush { log::debug!("doing a db flush"); let mut opts = FlushOptions::default(); opts.set_wait(true); @@ -441,7 +458,7 @@ impl MadaraBackend { *inst = Some(Instant::now()); } - Ok(should_flush) + Ok(will_flush) } pub async fn backup(&self) -> Result<()> { @@ -499,19 +516,9 @@ impl MadaraBackend { }) } - pub fn get_storage_size(&self, db_metrics: &DbMetrics) -> u64 { - let mut storage_size = 0; - - for &column in Column::ALL.iter() { - let cf_handle = self.db.get_column(column); - let cf_metadata = self.db.get_column_family_metadata_cf(&cf_handle); - let column_size = cf_metadata.size; - storage_size += column_size; - - db_metrics.column_sizes.with_label_values(&[column.rocksdb_name()]).set(column_size as i64); - } - - storage_size + /// Returns the total storage size + pub fn update_metrics(&self) -> u64 { + self.db_metrics.update(&self.db) } } diff --git a/crates/client/db/src/tests/common/mod.rs b/crates/client/db/src/tests/common/mod.rs index 6b9c31d29..7fbd7495a 100644 --- a/crates/client/db/src/tests/common/mod.rs +++ b/crates/client/db/src/tests/common/mod.rs @@ -17,12 +17,10 @@ use starknet_types_core::felt::Felt; pub mod temp_db { use crate::DatabaseService; use mp_chain_config::ChainConfig; - use tempfile::TempDir; pub async fn temp_db() -> DatabaseService { - let temp_dir = TempDir::new().unwrap(); let chain_config = std::sync::Arc::new(ChainConfig::test_config().expect("failed to retrieve test chain config")); - DatabaseService::new(temp_dir.path(), None, false, chain_config).await.unwrap() + DatabaseService::open_for_testing(chain_config) } } diff --git a/crates/client/db/src/tests/test_open.rs b/crates/client/db/src/tests/test_open.rs index 737a2b002..6b6b204ed 100644 --- a/crates/client/db/src/tests/test_open.rs +++ b/crates/client/db/src/tests/test_open.rs @@ -1,5 +1,6 @@ use super::common::*; use crate::DatabaseService; +use mc_metrics::MetricsRegistry; use mp_chain_config::ChainConfig; use mp_utils::tests_common::*; use rstest::*; @@ -18,8 +19,9 @@ async fn test_open_different_chain_id(_set_workdir: ()) { let chain_config = std::sync::Arc::new( ChainConfig::starknet_integration().expect("failed to retrieve integration chain config"), ); - let _db = DatabaseService::new(temp_dir.path(), None, false, chain_config).await.unwrap(); + let _db = + DatabaseService::new(temp_dir.path(), None, false, chain_config, &MetricsRegistry::dummy()).await.unwrap(); } let chain_config = std::sync::Arc::new(ChainConfig::test_config().expect("failed to retrieve test chain config")); - assert!(DatabaseService::new(temp_dir.path(), None, false, chain_config).await.is_err()); + assert!(DatabaseService::new(temp_dir.path(), None, false, chain_config, &MetricsRegistry::dummy()).await.is_err()); } diff --git a/crates/client/devnet/Cargo.toml b/crates/client/devnet/Cargo.toml index f7af78965..e8185e9a8 100644 --- a/crates/client/devnet/Cargo.toml +++ b/crates/client/devnet/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] rstest = { workspace = true } mc-db = { workspace = true, features = ["testing"] } mc-mempool = { workspace = true, features = ["testing"] } +mc-metrics = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread"] } proptest.workspace = true proptest-derive.workspace = true diff --git a/crates/client/devnet/src/lib.rs b/crates/client/devnet/src/lib.rs index bc0141f2e..b65a41e07 100644 --- a/crates/client/devnet/src/lib.rs +++ b/crates/client/devnet/src/lib.rs @@ -189,6 +189,7 @@ mod tests { use mc_mempool::block_production::BlockProductionTask; use mc_mempool::MempoolProvider; use mc_mempool::{transaction_hash, L1DataProvider, Mempool, MockL1DataProvider}; + use mc_metrics::MetricsRegistry; use mp_block::header::L1DataAvailabilityMode; use mp_block::{BlockId, BlockTag}; use mp_class::ClassInfo; @@ -210,7 +211,7 @@ mod tests { struct DevnetForTesting { backend: Arc, contracts: DevnetKeys, - block_production: BlockProductionTask, + block_production: BlockProductionTask, mempool: Arc, } @@ -300,7 +301,8 @@ mod tests { let chain_config = Arc::new(ChainConfig::test_config().unwrap()); let block = g.build(&chain_config).unwrap(); let backend = MadaraBackend::open_for_testing(Arc::clone(&chain_config)); - let importer = Arc::new(BlockImporter::new(Arc::clone(&backend))); + let importer = + Arc::new(BlockImporter::new(Arc::clone(&backend), &MetricsRegistry::dummy(), None, true).unwrap()); println!("{:?}", block.state_diff); tokio::runtime::Runtime::new() diff --git a/crates/client/eth/src/client.rs b/crates/client/eth/src/client.rs index 659684c08..885905b94 100644 --- a/crates/client/eth/src/client.rs +++ b/crates/client/eth/src/client.rs @@ -173,7 +173,7 @@ pub mod eth_client_getter_test { let contract = StarknetCoreContract::new(address, provider.clone()); let prometheus_service = MetricsService::new(true, false, 9615).unwrap(); - let l1_block_metrics = L1BlockMetrics::register(&prometheus_service.registry()).unwrap(); + let l1_block_metrics = L1BlockMetrics::register(prometheus_service.registry()).unwrap(); EthereumClient { provider: Arc::new(provider), l1_core_contract: contract.clone(), l1_block_metrics } } @@ -189,7 +189,7 @@ pub mod eth_client_getter_test { let core_contract_address = Address::parse_checksummed(INVALID_CORE_CONTRACT_ADDRESS, None).unwrap(); let prometheus_service = MetricsService::new(true, false, 9615).unwrap(); - let l1_block_metrics = L1BlockMetrics::register(&prometheus_service.registry()).unwrap(); + let l1_block_metrics = L1BlockMetrics::register(prometheus_service.registry()).unwrap(); let new_client_result = EthereumClient::new(rpc_url, core_contract_address, l1_block_metrics).await; assert!(new_client_result.is_err(), "EthereumClient::new should fail with an invalid core contract address"); diff --git a/crates/client/eth/src/l1_messaging.rs b/crates/client/eth/src/l1_messaging.rs index 5d458e223..b251394de 100644 --- a/crates/client/eth/src/l1_messaging.rs +++ b/crates/client/eth/src/l1_messaging.rs @@ -229,7 +229,7 @@ mod l1_messaging_tests { transports::http::{Client, Http}, }; use mc_db::DatabaseService; - use mc_metrics::MetricsService; + use mc_metrics::{MetricsRegistry, MetricsService}; use mp_chain_config::ChainConfig; use mp_utils::tests_common::*; use rstest::*; @@ -344,14 +344,14 @@ mod l1_messaging_tests { // Initialize database service let db = Arc::new( - DatabaseService::new(&base_path, backup_dir, false, chain_config.clone()) + DatabaseService::new(&base_path, backup_dir, false, chain_config.clone(), &MetricsRegistry::dummy()) .await .expect("Failed to create database service"), ); // Set up metrics service let prometheus_service = MetricsService::new(true, false, 9615).unwrap(); - let l1_block_metrics = L1BlockMetrics::register(&prometheus_service.registry()).unwrap(); + let l1_block_metrics = L1BlockMetrics::register(prometheus_service.registry()).unwrap(); // Set up provider let rpc_url: Url = anvil.endpoint().parse().expect("issue while parsing"); diff --git a/crates/client/eth/src/state_update.rs b/crates/client/eth/src/state_update.rs index 0cb284bb4..3776015e2 100644 --- a/crates/client/eth/src/state_update.rs +++ b/crates/client/eth/src/state_update.rs @@ -107,7 +107,7 @@ mod eth_client_event_subscription_test { use alloy::{node_bindings::Anvil, providers::ProviderBuilder, sol}; use mc_db::DatabaseService; - use mc_metrics::MetricsService; + use mc_metrics::{MetricsRegistry, MetricsService}; use mp_chain_config::ChainConfig; use mp_convert::ToFelt; use rstest::*; @@ -164,14 +164,14 @@ mod eth_client_event_subscription_test { // Initialize database service let db = Arc::new( - DatabaseService::new(&base_path, backup_dir, false, chain_info.clone()) + DatabaseService::new(&base_path, backup_dir, false, chain_info.clone(), &MetricsRegistry::dummy()) .await .expect("Failed to create database service"), ); // Set up metrics service let prometheus_service = MetricsService::new(true, false, 9615).unwrap(); - let l1_block_metrics = L1BlockMetrics::register(&prometheus_service.registry()).unwrap(); + let l1_block_metrics = L1BlockMetrics::register(prometheus_service.registry()).unwrap(); let rpc_url: Url = anvil.endpoint().parse().expect("issue while parsing"); let provider = ProviderBuilder::new().on_http(rpc_url); diff --git a/crates/client/exec/src/blockifier_state_adapter.rs b/crates/client/exec/src/blockifier_state_adapter.rs index 40136513e..bb265ccbc 100644 --- a/crates/client/exec/src/blockifier_state_adapter.rs +++ b/crates/client/exec/src/blockifier_state_adapter.rs @@ -61,19 +61,21 @@ impl StateReader for BlockifierStateAdapter { .get_contract_storage_at(&on_top_of_block_id, &contract_address.to_felt(), &key.to_felt()) .map_err(|err| { log::warn!( - "Failed to retrieve storage value for contract {contract_address:#?} at key {key:#?}: {err:#}" + "Failed to retrieve storage value for contract {contract_address:#?} at key {:#x}: {err:#}", + key.to_felt() ); StateError::StateReadError(format!( - "Failed to retrieve storage value for contract {contract_address:#?} at key {key:#?}", + "Failed to retrieve storage value for contract {contract_address:#?} at key {:#x}", + key.to_felt() )) })? .unwrap_or(Felt::ZERO); log::debug!( - "get_storage_at: on={:?}, contract={:?} key={:?} => {:#x}", + "get_storage_at: on={:?}, contract={} key={:#x} => {:#x}", self.on_top_of_block_id, contract_address, - key, + key.to_felt(), res ); @@ -81,22 +83,22 @@ impl StateReader for BlockifierStateAdapter { } fn get_nonce_at(&self, contract_address: ContractAddress) -> StateResult { - log::debug!("get_nonce_at for {:#?}", contract_address); + log::debug!("get_nonce_at for {}", contract_address); let Some(on_top_of_block_id) = self.on_top_of_block_id else { return Ok(Nonce::default()) }; Ok(Nonce( self.backend .get_contract_nonce_at(&on_top_of_block_id, &contract_address.to_felt()) .map_err(|err| { - log::warn!("Failed to retrieve nonce for contract {contract_address:#?}: {err:#}"); - StateError::StateReadError(format!("Failed to retrieve nonce for contract {contract_address:#?}",)) + log::warn!("Failed to retrieve nonce for contract {contract_address}: {err:#}"); + StateError::StateReadError(format!("Failed to retrieve nonce for contract {contract_address}",)) })? .unwrap_or(Felt::ZERO), )) } fn get_class_hash_at(&self, contract_address: ContractAddress) -> StateResult { - log::debug!("get_class_hash_at for {:#?}", contract_address); + log::debug!("get_class_hash_at for {}", contract_address); let Some(on_top_of_block_id) = self.on_top_of_block_id else { return Ok(ClassHash::default()) }; // Note that blockifier is fine with us returning ZERO as a class_hash if it is not found, they do the check on their end after @@ -105,8 +107,8 @@ impl StateReader for BlockifierStateAdapter { .get_contract_class_hash_at(&on_top_of_block_id, &contract_address.to_felt()) .map_err(|err| { StateError::StateReadError(format!( - "Failed to retrieve class hash for contract {:#}: {:#}", - contract_address.0.key(), + "Failed to retrieve class hash for contract {:#x}: {:#}", + contract_address.to_felt(), err )) })? @@ -115,7 +117,7 @@ impl StateReader for BlockifierStateAdapter { } fn get_compiled_contract_class(&self, class_hash: ClassHash) -> StateResult { - log::debug!("get_compiled_contract_class for {:#?}", class_hash); + log::debug!("get_compiled_contract_class for {:#x}", class_hash.to_felt()); let Some(on_top_of_block_id) = self.on_top_of_block_id else { return Err(StateError::UndeclaredClassHash(class_hash)); @@ -136,12 +138,17 @@ impl StateReader for BlockifierStateAdapter { .backend .get_sierra_compiled(&on_top_of_block_id, &info.compiled_class_hash) .map_err(|err| { - log::warn!("Failed to retrieve sierra compiled class {class_hash:#}: {err:#}"); - StateError::StateReadError(format!("Failed to retrieve compiled class {class_hash:#}")) + log::warn!("Failed to retrieve sierra compiled class {:#x}: {err:#}", class_hash.to_felt()); + StateError::StateReadError(format!( + "Failed to retrieve compiled class {:#x}", + class_hash.to_felt() + )) })? .ok_or(StateError::StateReadError(format!( - "Inconsistent state: compiled sierra class {class_hash:#} not found" + "Inconsistent state: compiled sierra class {:#x} not found", + class_hash.to_felt() )))?; + // TODO: convert ClassCompilationError to StateError Ok(compiled_class.to_blockifier_class().map_err(|e| StateError::StateReadError(e.to_string()))?) } @@ -153,15 +160,18 @@ impl StateReader for BlockifierStateAdapter { } fn get_compiled_class_hash(&self, class_hash: ClassHash) -> StateResult { - log::debug!("get_compiled_class_hash for {:#?}", class_hash); + log::debug!("get_compiled_class_hash for {:#x}", class_hash.to_felt()); let Some(on_top_of_block_id) = self.on_top_of_block_id else { return Err(StateError::UndeclaredClassHash(class_hash)); }; let Some(class_info) = self.backend.get_class_info(&on_top_of_block_id, &class_hash.to_felt()).map_err(|err| { - log::warn!("Failed to retrieve compiled class hash {class_hash:#}: {err:#}"); - StateError::StateReadError(format!("Failed to retrieve compiled class hash {class_hash:#}",)) + log::warn!("Failed to retrieve compiled class hash {:#x}: {err:#}", class_hash.to_felt()); + StateError::StateReadError(format!( + "Failed to retrieve compiled class hash {:#x}", + class_hash.to_felt() + )) })? else { return Err(StateError::UndeclaredClassHash(class_hash)); diff --git a/crates/client/exec/src/lib.rs b/crates/client/exec/src/lib.rs index 46ad10819..12ef6e9ac 100644 --- a/crates/client/exec/src/lib.rs +++ b/crates/client/exec/src/lib.rs @@ -67,7 +67,7 @@ pub struct MessageFeeEstimationError { } #[derive(thiserror::Error, Debug)] -#[error("Calling contract {contract:#} on top of {block_n:#}: {err:#}")] +#[error("Calling contract {contract:#x} on top of {block_n:#}: {err:#}")] pub struct CallContractError { block_n: DbBlockId, contract: Felt, diff --git a/crates/client/mempool/src/block_production.rs b/crates/client/mempool/src/block_production.rs index 9971e4897..b1ec49dbd 100644 --- a/crates/client/mempool/src/block_production.rs +++ b/crates/client/mempool/src/block_production.rs @@ -6,7 +6,7 @@ use blockifier::state::cached_state::CommitmentStateDiff; use blockifier::state::state_api::StateReader; use blockifier::transaction::errors::TransactionExecutionError; use blockifier::transaction::transaction_execution::Transaction; -use mc_block_import::BlockImporter; +use mc_block_import::{BlockImportError, BlockImporter}; use mc_db::db_block_id::DbBlockId; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::{BlockifierStateAdapter, ExecutionContext}; @@ -21,15 +21,26 @@ use mp_state_update::{ use mp_transactions::TransactionWithHash; use mp_utils::graceful_shutdown; use starknet_types_core::felt::Felt; +use std::collections::VecDeque; use std::mem; use std::sync::Arc; +use std::time::Instant; use crate::close_block::close_block; use crate::header::make_pending_header; -use crate::{clone_account_tx, L1DataProvider, Mempool, MempoolProvider, MempoolTransaction}; - -/// We always take transactions in batches from the mempool -const TX_BATCH_SIZE: usize = 128; +use crate::{clone_account_tx, L1DataProvider, MempoolProvider, MempoolTransaction}; + +#[derive(Default, Clone)] +struct ContinueBlockStats { + /// Number of batches executed before reaching the bouncer capacity. + pub n_batches: usize, + /// Number of transactions included into the block + pub n_added_to_block: usize, + pub n_re_added_to_mempool: usize, + pub n_reverted: usize, + /// Rejected are txs that were unsucessful and but that were not revertible. + pub n_rejected: usize, +} #[derive(Debug, thiserror::Error)] pub enum Error { @@ -153,10 +164,13 @@ fn finalize_execution_state( /// The block production task consumes transactions from the mempool in batches. /// This is to allow optimistic concurrency. However, the block may get full during batch execution, /// and we need to re-add the transactions back into the mempool. -pub struct BlockProductionTask { +/// +/// To understand block production in madara, you should probably start with the [`mp_chain_config::ChainConfig`] +/// documentation. +pub struct BlockProductionTask { importer: Arc, backend: Arc, - mempool: Arc, + mempool: Arc, block: MadaraPendingBlock, declared_classes: Vec, pub(crate) executor: TransactionExecutor, @@ -164,7 +178,7 @@ pub struct BlockProductionTask { current_pending_tick: usize, } -impl BlockProductionTask { +impl BlockProductionTask { #[cfg(any(test, feature = "testing"))] pub fn set_current_pending_tick(&mut self, n: usize) { self.current_pending_tick = n; @@ -207,22 +221,84 @@ impl BlockProductionTask { }) } - fn continue_block(&mut self, bouncer_cap: BouncerWeights) -> Result { + fn continue_block(&mut self, bouncer_cap: BouncerWeights) -> Result<(StateDiff, ContinueBlockStats), Error> { + let mut stats = ContinueBlockStats::default(); + self.executor.bouncer.bouncer_config.block_max_capacity = bouncer_cap; + let batch_size = self.backend.chain_config().execution_batch_size; + + let mut txs_to_process = VecDeque::with_capacity(batch_size); + let mut txs_to_process_blockifier = Vec::with_capacity(batch_size); + // This does not need to be outside the loop, but that saves an allocation + let mut executed_txs = Vec::with_capacity(batch_size); + + loop { + // Take transactions from mempool. + let to_take = batch_size.saturating_sub(txs_to_process.len()); + if to_take > 0 { + self.mempool.take_txs_chunk(/* extend */ &mut txs_to_process, batch_size); + } + + if txs_to_process.is_empty() { + // Not enough transactions in mempool to make a new batch. + break; + } - let mut txs_to_process = Vec::with_capacity(TX_BATCH_SIZE); - self.mempool.take_txs_chunk(&mut txs_to_process, TX_BATCH_SIZE); + stats.n_batches += 1; + + txs_to_process_blockifier + .extend(txs_to_process.iter().map(|tx| Transaction::AccountTransaction(clone_account_tx(&tx.tx)))); + + // Execute the transactions. + let all_results = self.executor.execute_txs(&txs_to_process_blockifier); + // When the bouncer cap is reached, blockifier will return fewer results than what we asked for. + let block_now_full = all_results.len() < txs_to_process_blockifier.len(); + + for exec_result in all_results { + let mut mempool_tx = txs_to_process.pop_front().expect("Vector length mismatch"); + match exec_result { + Ok(execution_info) => { + // Reverted transactions appear here as Ok too. + log::debug!("Successful execution of transaction {:#x}", mempool_tx.tx_hash().to_felt()); + + stats.n_added_to_block += 1; + if execution_info.is_reverted() { + stats.n_reverted += 1; + } + + if let Some(class) = mem::take(&mut mempool_tx.converted_class) { + self.declared_classes.push(class); + } + + self.block.inner.receipts.push(from_blockifier_execution_info( + &execution_info, + &Transaction::AccountTransaction(clone_account_tx(&mempool_tx.tx)), + )); + let converted_tx = TransactionWithHash::from(clone_account_tx(&mempool_tx.tx)); // TODO: too many tx clones! + self.block.info.tx_hashes.push(converted_tx.hash); + self.block.inner.transactions.push(converted_tx.transaction); + } + Err(err) => { + // These are the transactions that have errored but we can't revert them. It can be because of an internal server error, but + // errors during the execution of Declare and DeployAccount also appear here as they cannot be reverted. + // We reject them. + // Note that this is a big DoS vector. + log::error!("Rejected transaction {} for unexpected error: {err:#}", mempool_tx.tx_hash()); + stats.n_rejected += 1; + } + } - let blockifier_txs: Vec<_> = - txs_to_process.iter().map(|tx| Transaction::AccountTransaction(clone_account_tx(&tx.tx))).collect(); + executed_txs.push(mempool_tx) + } - // Execute the transactions. - let all_results = self.executor.execute_txs(&blockifier_txs); + if block_now_full { + break; + } + } - // Split the `txs_to_process` vec into two iterators. - let mut to_process_iter = txs_to_process.into_iter(); - // This iterator will consume the first part of `to_process_iter`. - let consumed_txs_to_process = to_process_iter.by_ref().take(all_results.len()); + // Add back the unexecuted transactions to the mempool. + stats.n_re_added_to_mempool = txs_to_process.len(); + self.mempool.re_add_txs(txs_to_process); let on_top_of = self .executor @@ -231,94 +307,80 @@ impl BlockProductionTask { .expect("Block state can not be None unless we take ownership of it") .state .on_top_of_block_id; - let executed_txs: Vec<_> = consumed_txs_to_process.collect(); + let (state_diff, _visited_segments, _weights) = finalize_execution_state(&executed_txs, &mut self.executor, &self.backend, &on_top_of)?; - let n_executed_txs = executed_txs.len(); - - for (exec_result, mempool_tx) in Iterator::zip(all_results.into_iter(), executed_txs) { - log::debug!("res for {:?}", mempool_tx); - match exec_result { - Ok(execution_info) => { - // Reverted transactions appear here as Ok too. - log::debug!("Successful execution of transaction {}", mempool_tx.tx_hash()); - - if let Some(class) = mempool_tx.converted_class { - self.declared_classes.push(class); - } - - self.block.inner.receipts.push(from_blockifier_execution_info( - &execution_info, - &Transaction::AccountTransaction(clone_account_tx(&mempool_tx.tx)), - )); - let converted_tx = TransactionWithHash::from(mempool_tx.tx); - self.block.info.tx_hashes.push(converted_tx.hash); - self.block.inner.transactions.push(converted_tx.transaction); - } - Err(err) => { - // These are the transactions that have errored but we can't revert them. It can be because of an internal server error, but - // errors during the execution of Declare and DeployAccount also appear here as they cannot be reverted. - // We reject them. - // Note that this is a big DoS vector. - log::error!("Unsuccessful execution of transaction {}: {err:#}", mempool_tx.tx_hash()); - } - } - } - log::debug!( - "Finished tick with {} new transactions, now at {}", - n_executed_txs, - self.block.inner.transactions.len() + "Finished tick with {} new transactions, now at {} - re-adding {} txs to mempool", + stats.n_added_to_block, + self.block.inner.transactions.len(), + stats.n_re_added_to_mempool ); - // This contains the rest of `to_process_iter`. - let rest_txs_to_process: Vec<_> = to_process_iter.collect(); - - // Add back the unexecuted transactions to the mempool. - self.mempool.re_add_txs(rest_txs_to_process); - - Ok(state_diff) + Ok((state_diff, stats)) } /// Each "tick" of the block time updates the pending block but only with the appropriate fraction of the total bouncer capacity. pub fn on_pending_time_tick(&mut self) -> Result<(), Error> { let current_pending_tick = self.current_pending_tick; - let n_pending_ticks_per_block = self.backend.chain_config().n_pending_ticks_per_block(); - - log::debug!("begin pending tick {}/{}", current_pending_tick, n_pending_ticks_per_block); + let config_bouncer = self.backend.chain_config().bouncer_config.block_max_capacity; + if current_pending_tick == 0 { + return Ok(()); + } // Reduced bouncer capacity for the current pending tick - let config_bouncer = self.executor.bouncer.bouncer_config.block_max_capacity; - let frac = n_pending_ticks_per_block / current_pending_tick; // div by zero: current_pending_tick has been checked for 0 above + // reduced_gas = gas * current_pending_tick/n_pending_ticks_per_block + // - we're dealing with integers here so prefer having the division last + // - use u128 here because the multiplication would overflow + // - div by zero: see [`ChainConfig::precheck_block_production`] + let reduced_cap = + |v: usize| (v as u128 * current_pending_tick as u128 / n_pending_ticks_per_block as u128) as usize; + + let gas = reduced_cap(config_bouncer.gas); + let frac = current_pending_tick as f64 / n_pending_ticks_per_block as f64; + log::debug!("begin pending tick {current_pending_tick}/{n_pending_ticks_per_block}, proportion for this tick: {frac:.2}, gas limit: {gas}/{}", config_bouncer.gas); - log::debug!("frac for this tick: {:.2}", 1f64 / frac as f64); let bouncer_cap = BouncerWeights { builtin_count: BuiltinCount { - add_mod: config_bouncer.builtin_count.add_mod / frac, - bitwise: config_bouncer.builtin_count.bitwise / frac, - ecdsa: config_bouncer.builtin_count.ecdsa / frac, - ec_op: config_bouncer.builtin_count.ec_op / frac, - keccak: config_bouncer.builtin_count.keccak / frac, - mul_mod: config_bouncer.builtin_count.mul_mod / frac, - pedersen: config_bouncer.builtin_count.pedersen / frac, - poseidon: config_bouncer.builtin_count.poseidon / frac, - range_check: config_bouncer.builtin_count.range_check / frac, - range_check96: config_bouncer.builtin_count.range_check96 / frac, + add_mod: reduced_cap(config_bouncer.builtin_count.add_mod), + bitwise: reduced_cap(config_bouncer.builtin_count.bitwise), + ecdsa: reduced_cap(config_bouncer.builtin_count.ecdsa), + ec_op: reduced_cap(config_bouncer.builtin_count.ec_op), + keccak: reduced_cap(config_bouncer.builtin_count.keccak), + mul_mod: reduced_cap(config_bouncer.builtin_count.mul_mod), + pedersen: reduced_cap(config_bouncer.builtin_count.pedersen), + poseidon: reduced_cap(config_bouncer.builtin_count.poseidon), + range_check: reduced_cap(config_bouncer.builtin_count.range_check), + range_check96: reduced_cap(config_bouncer.builtin_count.range_check96), }, - gas: config_bouncer.gas / frac, - message_segment_length: config_bouncer.message_segment_length / frac, - n_events: config_bouncer.n_events / frac, - n_steps: config_bouncer.n_steps / frac, - state_diff_size: config_bouncer.state_diff_size / frac, + gas, + message_segment_length: reduced_cap(config_bouncer.message_segment_length), + n_events: reduced_cap(config_bouncer.n_events), + n_steps: reduced_cap(config_bouncer.n_steps), + state_diff_size: reduced_cap(config_bouncer.state_diff_size), }; - let state_diff = self.continue_block(bouncer_cap)?; + let start_time = Instant::now(); + let (state_diff, stats) = self.continue_block(bouncer_cap)?; + if stats.n_added_to_block > 0 { + log::info!( + "🧮 Executed and added {} transaction(s) to the pending block at height {} - {:?}", + stats.n_added_to_block, + self.block_n(), + start_time.elapsed(), + ); + } // Store pending block + // todo, prefer using the block import pipeline? self.backend.store_block(self.block.clone().into(), state_diff, self.declared_classes.clone())?; + // do not forget to flush :) + self.backend + .maybe_flush(true) + .map_err(|err| BlockImportError::Internal(format!("DB flushing error: {err:#}").into()))?; Ok(()) } @@ -329,7 +391,9 @@ impl BlockProductionTask { log::debug!("closing block #{}", block_n); // Complete the block with full bouncer capacity. - let new_state_diff = self.continue_block(self.executor.bouncer.bouncer_config.block_max_capacity)?; + let start_time = Instant::now(); + let (new_state_diff, _n_executed) = + self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; // Convert the pending block to a closed block and save to db. @@ -341,7 +405,7 @@ impl BlockProductionTask { )); let block_to_close = mem::replace(&mut self.block, new_empty_block); - let _declared_classes = mem::take(&mut self.declared_classes); + let declared_classes = mem::take(&mut self.declared_classes); let n_txs = block_to_close.inner.transactions.len(); @@ -352,6 +416,7 @@ impl BlockProductionTask { &new_state_diff, self.backend.chain_config().chain_id.clone(), block_n, + declared_classes, ) .await?; self.block.info.header.parent_block_hash = import_result.block_hash; // fix temp parent block hash for new pending :) @@ -361,7 +426,7 @@ impl BlockProductionTask { ExecutionContext::new_in_block(Arc::clone(&self.backend), &self.block.info.clone().into())?.tx_executor(); self.current_pending_tick = 0; - log::info!("⛏️ Closed block #{} with {} transactions", block_n, n_txs); + log::info!("⛏️ Closed block #{} with {} transactions - {:?}", block_n, n_txs, start_time.elapsed()); Ok(()) } @@ -376,6 +441,8 @@ impl BlockProductionTask { tokio::time::interval_at(start, self.backend.chain_config().pending_block_update_time); interval_pending_block_update.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + self.backend.chain_config().precheck_block_production()?; // check chain config for invalid config + log::info!("⛏️ Starting block production at block #{}", self.block_n()); loop { diff --git a/crates/client/mempool/src/close_block.rs b/crates/client/mempool/src/close_block.rs index 181303651..1ccbb7f03 100644 --- a/crates/client/mempool/src/close_block.rs +++ b/crates/client/mempool/src/close_block.rs @@ -2,6 +2,7 @@ use mc_block_import::{ BlockImportError, BlockImportResult, BlockImporter, BlockValidationContext, UnverifiedFullBlock, UnverifiedHeader, }; use mp_block::{header::PendingHeader, MadaraPendingBlock, MadaraPendingBlockInfo}; +use mp_class::ConvertedClass; use mp_state_update::StateDiff; use starknet_api::core::ChainId; @@ -12,6 +13,7 @@ pub async fn close_block( state_diff: &StateDiff, chain_id: ChainId, block_number: u64, + declared_classes: Vec, ) -> Result { let validation = BlockValidationContext::new(chain_id).trust_transaction_hashes(true); @@ -43,8 +45,9 @@ pub async fn close_block( state_diff: state_diff.clone(), transactions: inner.transactions, receipts: inner.receipts, - declared_classes: vec![], + trusted_converted_classes: declared_classes, commitments: Default::default(), // the block importer will compute the commitments for us + ..Default::default() }, validation.clone(), ) diff --git a/crates/client/mempool/src/inner.rs b/crates/client/mempool/src/inner.rs index ff0ac726a..8faf8a725 100644 --- a/crates/client/mempool/src/inner.rs +++ b/crates/client/mempool/src/inner.rs @@ -327,14 +327,11 @@ impl MempoolInner { Some(mempool_tx) } - pub fn pop_next_chunk(&mut self, dest: &mut Vec, n: usize) { - for _ in 0..n { - let Some(tx) = self.pop_next() else { break }; - dest.push(tx); - } + pub fn pop_next_chunk(&mut self, dest: &mut impl Extend, n: usize) { + dest.extend((0..n).map_while(|_| self.pop_next())) } - pub fn re_add_txs(&mut self, txs: Vec) { + pub fn re_add_txs(&mut self, txs: impl IntoIterator) { for tx in txs { let force = true; self.insert_tx(tx, force).expect("Force insert tx should not error"); diff --git a/crates/client/mempool/src/lib.rs b/crates/client/mempool/src/lib.rs index b33ddfd69..d3f264d84 100644 --- a/crates/client/mempool/src/lib.rs +++ b/crates/client/mempool/src/lib.rs @@ -29,6 +29,7 @@ use starknet_types_core::felt::Felt; use std::sync::Arc; use std::sync::RwLock; +pub use inner::TxInsersionError; pub use inner::{ArrivedAtTimestamp, MempoolTransaction}; #[cfg(any(test, feature = "testing"))] pub use l1::MockL1DataProvider; @@ -47,7 +48,7 @@ pub enum Error { #[error("Validation error: {0:#}")] Validation(#[from] StatefulValidatorError), #[error(transparent)] - InnerMempool(#[from] inner::TxInsersionError), + InnerMempool(#[from] TxInsersionError), #[error(transparent)] Exec(#[from] mc_exec::Error), #[error("Preprocessing transaction: {0:#}")] @@ -55,7 +56,7 @@ pub enum Error { } impl Error { pub fn is_internal(&self) -> bool { - !matches!(self, Error::Validation(_)) + matches!(self, Error::StorageError(_) | Error::BroadcastedToBlockifier(_)) } } @@ -67,9 +68,13 @@ pub trait MempoolProvider: Send + Sync { &self, tx: BroadcastedDeployAccountTransaction, ) -> Result; - fn take_txs_chunk(&self, dest: &mut Vec, n: usize); + fn take_txs_chunk + 'static>(&self, dest: &mut I, n: usize) + where + Self: Sized; fn take_tx(&self) -> Option; - fn re_add_txs(&self, txs: Vec); + fn re_add_txs + 'static>(&self, txs: I) + where + Self: Sized; fn chain_id(&self) -> Felt; } @@ -209,7 +214,8 @@ impl MempoolProvider for Mempool { Ok(res) } - fn take_txs_chunk(&self, dest: &mut Vec, n: usize) { + /// Warning: A lock is held while a user-supplied function (extend) is run - Callers should be careful + fn take_txs_chunk + 'static>(&self, dest: &mut I, n: usize) { let mut inner = self.inner.write().expect("Poisoned lock"); inner.pop_next_chunk(dest, n) } @@ -219,7 +225,8 @@ impl MempoolProvider for Mempool { inner.pop_next() } - fn re_add_txs(&self, txs: Vec) { + /// Warning: A lock is taken while a user-supplied function (iterator stuff) is run - Callers should be careful + fn re_add_txs + 'static>(&self, txs: I) { let mut inner = self.inner.write().expect("Poisoned lock"); inner.re_add_txs(txs) } diff --git a/crates/client/metrics/src/lib.rs b/crates/client/metrics/src/lib.rs index 31c60e8c3..ce6409f60 100644 --- a/crates/client/metrics/src/lib.rs +++ b/crates/client/metrics/src/lib.rs @@ -19,6 +19,7 @@ pub use prometheus::{ }; #[derive(Clone, Debug)] +/// This sturct can be cloned cheaply, and will still point to the same registry. pub struct MetricsRegistry(Option); // Registry is already an Arc impl MetricsRegistry { @@ -34,6 +35,11 @@ impl MetricsRegistry { pub fn is_enabled(&self) -> bool { self.0.is_some() } + + /// Make a dummy registry that does nothing. Useful for wiring up metrics in tests. + pub fn dummy() -> Self { + Self(None) + } } #[derive(thiserror::Error, Debug)] @@ -82,8 +88,8 @@ impl MetricsService { }) } - pub fn registry(&self) -> MetricsRegistry { - self.registry.clone() + pub fn registry(&self) -> &MetricsRegistry { + &self.registry } } diff --git a/crates/client/rpc/src/errors.rs b/crates/client/rpc/src/errors.rs index 7af071406..1e5403ba3 100644 --- a/crates/client/rpc/src/errors.rs +++ b/crates/client/rpc/src/errors.rs @@ -127,6 +127,7 @@ impl StarknetRpcApiError { pub fn data(&self) -> Option { match self { StarknetRpcApiError::ErrUnexpectedError { data } => Some(json!(data)), + StarknetRpcApiError::ValidationFailure { error } => Some(json!(error)), StarknetRpcApiError::TxnExecutionError { tx_index, error } => Some(json!({ "transaction_index": tx_index, "execution_error": error, diff --git a/crates/client/rpc/src/providers/mempool.rs b/crates/client/rpc/src/providers/mempool.rs index df69e7746..26623e310 100644 --- a/crates/client/rpc/src/providers/mempool.rs +++ b/crates/client/rpc/src/providers/mempool.rs @@ -20,12 +20,24 @@ impl MempoolAddTxProvider { } } -fn make_err(err: mc_mempool::Error) -> StarknetRpcApiError { - if err.is_internal() { - display_internal_server_error(format!("{err:#}")); - StarknetRpcApiError::InternalServerError - } else { - StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") } +impl From for StarknetRpcApiError { + fn from(value: mc_mempool::Error) -> Self { + match value { + mc_mempool::Error::InnerMempool(mc_mempool::TxInsersionError::NonceConflict) => { + StarknetRpcApiError::DuplicateTxn + } + mc_mempool::Error::Validation(err) => StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") }, + mc_mempool::Error::InnerMempool(err) => { + StarknetRpcApiError::ValidationFailure { error: format!("{err:#}") } + } + mc_mempool::Error::Exec(err) => { + StarknetRpcApiError::TxnExecutionError { tx_index: 0, error: format!("{err:#}") } + } + err => { + display_internal_server_error(format!("{err:#}")); + StarknetRpcApiError::InternalServerError + } + } } } @@ -35,18 +47,18 @@ impl AddTransactionProvider for MempoolAddTxProvider { &self, declare_transaction: BroadcastedDeclareTransaction, ) -> RpcResult { - Ok(self.mempool.accept_declare_tx(declare_transaction).map_err(make_err)?) + Ok(self.mempool.accept_declare_tx(declare_transaction).map_err(StarknetRpcApiError::from)?) } async fn add_deploy_account_transaction( &self, deploy_account_transaction: BroadcastedDeployAccountTransaction, ) -> RpcResult { - Ok(self.mempool.accept_deploy_account_tx(deploy_account_transaction).map_err(make_err)?) + Ok(self.mempool.accept_deploy_account_tx(deploy_account_transaction).map_err(StarknetRpcApiError::from)?) } async fn add_invoke_transaction( &self, invoke_transaction: BroadcastedInvokeTransaction, ) -> RpcResult { - Ok(self.mempool.accept_invoke_tx(invoke_transaction).map_err(make_err)?) + Ok(self.mempool.accept_invoke_tx(invoke_transaction).map_err(StarknetRpcApiError::from)?) } } diff --git a/crates/client/sync/src/fetch/fetchers.rs b/crates/client/sync/src/fetch/fetchers.rs index 1bb113ed5..92a94426d 100644 --- a/crates/client/sync/src/fetch/fetchers.rs +++ b/crates/client/sync/src/fetch/fetchers.rs @@ -346,6 +346,7 @@ fn convert_sequencer_block( .context("Converting the transactions")?, declared_classes: class_update.into_iter().map(Into::into).collect(), commitments, + ..Default::default() }) } diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index 33fb7379a..faf83a81a 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -1,26 +1,21 @@ //! Contains the code required to sync data from the feeder efficiently. use crate::fetch::fetchers::fetch_pending_block_and_updates; use crate::fetch::l2_fetch_task; -use crate::metrics::block_metrics::BlockMetrics; use crate::utils::trim_hash; use anyhow::Context; use futures::{stream, StreamExt}; use mc_block_import::{ BlockImportResult, BlockImporter, BlockValidationContext, PreValidatedBlock, UnverifiedFullBlock, }; -use mc_db::db_metrics::DbMetrics; use mc_db::MadaraBackend; use mc_db::MadaraStorageError; use mc_telemetry::{TelemetryHandle, VerbosityLevel}; -use mp_block::Header; -use mp_utils::{channel_wait_or_graceful_shutdown, stopwatch_end, wait_or_graceful_shutdown, PerfStopwatch}; -use num_traits::FromPrimitive; +use mp_utils::{channel_wait_or_graceful_shutdown, wait_or_graceful_shutdown, PerfStopwatch}; use starknet_api::core::ChainId; use starknet_providers::{ProviderError, SequencerGatewayProvider}; use starknet_types_core::felt::Felt; use std::pin::pin; -use std::sync::{Arc, Mutex}; -use std::time::Instant; +use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; use tokio::task::JoinSet; use tokio::time::Duration; @@ -53,31 +48,11 @@ async fn l2_verify_and_apply_task( block_import: Arc, validation: BlockValidationContext, backup_every_n_blocks: Option, - block_metrics: BlockMetrics, - db_metrics: DbMetrics, - starting_block: u64, - sync_timer: Arc>>, telemetry: TelemetryHandle, ) -> anyhow::Result<()> { while let Some(block) = channel_wait_or_graceful_shutdown(pin!(updates_receiver.recv())).await { let BlockImportResult { header, block_hash } = block_import.verify_apply(block, validation.clone()).await?; - update_sync_metrics( - header.block_number, - &header, - starting_block, - &block_metrics, - &db_metrics, - sync_timer.clone(), - &backend, - ) - .await?; - - let sw = PerfStopwatch::new(); - if backend.maybe_flush(false)? { - stopwatch_end!(sw, "flush db: {:?}"); - } - log::info!( "✨ Imported #{} ({}) and updated state root ({})", header.block_number, @@ -109,11 +84,6 @@ async fn l2_verify_and_apply_task( } } - let sw = PerfStopwatch::new(); - if backend.maybe_flush(true)? { - stopwatch_end!(sw, "flush db: {:?}"); - } - Ok(()) } @@ -212,16 +182,13 @@ pub async fn sync( backend: &Arc, provider: SequencerGatewayProvider, config: L2SyncConfig, - block_metrics: BlockMetrics, - db_metrics: DbMetrics, - starting_block: u64, chain_id: ChainId, telemetry: TelemetryHandle, + block_importer: Arc, ) -> anyhow::Result<()> { let (fetch_stream_sender, fetch_stream_receiver) = mpsc::channel(8); let (block_conv_sender, block_conv_receiver) = mpsc::channel(4); let provider = Arc::new(provider); - let sync_timer = Arc::new(Mutex::new(None)); let (once_caught_up_cb_sender, once_caught_up_cb_receiver) = oneshot::channel(); // [Fetch task] ==new blocks and updates=> [Block conversion task] ======> [Verification and apply @@ -233,7 +200,6 @@ pub async fn sync( // we are using separate tasks so that fetches don't get clogged up if by any chance the verify task // starves the tokio worker - let block_importer = Arc::new(BlockImporter::new(Arc::clone(backend))); let validation = BlockValidationContext { trust_transaction_hashes: false, trust_global_tries: config.verify, @@ -264,10 +230,6 @@ pub async fn sync( Arc::clone(&block_importer), validation.clone(), config.backup_every_n_blocks, - block_metrics, - db_metrics, - starting_block, - Arc::clone(&sync_timer), telemetry, )); join_set.spawn(l2_pending_block_task( @@ -285,45 +247,3 @@ pub async fn sync( Ok(()) } - -async fn update_sync_metrics( - block_number: u64, - block_header: &Header, - starting_block: u64, - block_metrics: &BlockMetrics, - db_metrics: &DbMetrics, - sync_timer: Arc>>, - backend: &MadaraBackend, -) -> anyhow::Result<()> { - // Update Block sync time metrics - let elapsed_time = { - let mut timer_guard = sync_timer.lock().expect("Poisoned lock"); - *timer_guard = Some(Instant::now()); - if let Some(start_time) = *timer_guard { - start_time.elapsed().as_secs_f64() - } else { - // For the first block, there is no previous timer set - 0.0 - } - }; - - let sync_time = block_metrics.l2_sync_time.get() + elapsed_time; - block_metrics.l2_sync_time.set(sync_time); - block_metrics.l2_latest_sync_time.set(elapsed_time); - block_metrics.l2_avg_sync_time.set(block_metrics.l2_sync_time.get() / (block_number - starting_block) as f64); - - block_metrics.l2_block_number.set(block_header.block_number as f64); - block_metrics.transaction_count.set(f64::from_u64(block_header.transaction_count).unwrap_or(0f64)); - block_metrics.event_count.set(f64::from_u64(block_header.event_count).unwrap_or(0f64)); - - block_metrics.l1_gas_price_wei.set(f64::from_u128(block_header.l1_gas_price.eth_l1_gas_price).unwrap_or(0f64)); - block_metrics.l1_gas_price_strk.set(f64::from_u128(block_header.l1_gas_price.strk_l1_gas_price).unwrap_or(0f64)); - - if block_number % 200 == 0 { - let storage_size = backend.get_storage_size(db_metrics); - let size_gb = storage_size as f64 / (1024 * 1024 * 1024) as f64; - block_metrics.l2_state_size.set(size_gb); - } - - Ok(()) -} diff --git a/crates/client/sync/src/lib.rs b/crates/client/sync/src/lib.rs index 488e9ea9d..95d132842 100644 --- a/crates/client/sync/src/lib.rs +++ b/crates/client/sync/src/lib.rs @@ -1,8 +1,8 @@ use crate::l2::L2SyncConfig; -use crate::metrics::block_metrics::BlockMetrics; use anyhow::Context; use fetch::fetchers::FetchConfig; -use mc_db::{db_metrics::DbMetrics, MadaraBackend}; +use mc_block_import::BlockImporter; +use mc_db::MadaraBackend; use mc_telemetry::TelemetryHandle; use mp_convert::ToFelt; use starknet_providers::SequencerGatewayProvider; @@ -17,16 +17,15 @@ pub mod utils; #[allow(clippy::too_many_arguments)] pub async fn sync( backend: &Arc, + block_importer: Arc, fetch_config: FetchConfig, starting_block: Option, backup_every_n_blocks: Option, - block_metrics: BlockMetrics, - db_metrics: DbMetrics, telemetry: TelemetryHandle, pending_block_poll_interval: Duration, ) -> anyhow::Result<()> { let (starting_block, ignore_block_order) = if let Some(starting_block) = starting_block { - log::warn!("⚠️ Forcing unordered state. This will most probably break your database."); + log::warn!("Forcing unordered state. This will most probably break your database."); (starting_block, true) } else { ( @@ -63,11 +62,9 @@ pub async fn sync( pending_block_poll_interval, ignore_block_order, }, - block_metrics, - db_metrics, - starting_block, backend.chain_config().chain_id.clone(), telemetry, + block_importer, ) .await?; diff --git a/crates/node/src/main.rs b/crates/node/src/main.rs index 349c6f3d6..5d7a59cfe 100644 --- a/crates/node/src/main.rs +++ b/crates/node/src/main.rs @@ -48,7 +48,7 @@ async fn main() -> anyhow::Result<()> { log::info!("🏷 Node Name: {}", node_name); let role = if run_cmd.is_authority() { "authority" } else { "full node" }; log::info!("👤 Role: {}", role); - log::info!("🌐 Network: {}", chain_config.chain_name); + log::info!("🌐 Network: {} (chain id `{}`)", chain_config.chain_name, chain_config.chain_id); let sys_info = SysInfo::probe(); sys_info.show(); @@ -72,11 +72,22 @@ async fn main() -> anyhow::Result<()> { run_cmd.db_params.backup_dir.clone(), run_cmd.db_params.restore_from_latest_backup, Arc::clone(&chain_config), + prometheus_service.registry(), ) .await .context("Initializing db service")?; - let importer = Arc::new(BlockImporter::new(Arc::clone(db_service.backend()))); + let importer = Arc::new( + BlockImporter::new( + Arc::clone(db_service.backend()), + prometheus_service.registry(), + run_cmd.sync_params.unsafe_starting_block, + // Always flush when in authority mode as we really want to minimize the risk of losing a block when the app is unexpectedly killed :) + /* always_force_flush */ + run_cmd.is_authority(), + ) + .context("Initializing importer service")?, + ); let l1_gas_setter = GasPriceProvider::new(); let l1_data_provider: Arc = Arc::new(l1_gas_setter.clone()); @@ -125,7 +136,7 @@ async fn main() -> anyhow::Result<()> { Arc::clone(&chain_config), run_cmd.network, &db_service, - prometheus_service.registry(), + importer, telemetry_service.new_handle(), ) .await diff --git a/crates/node/src/service/block_production.rs b/crates/node/src/service/block_production.rs index 799f1eba6..d2dbbb0b1 100644 --- a/crates/node/src/service/block_production.rs +++ b/crates/node/src/service/block_production.rs @@ -32,7 +32,7 @@ impl BlockProductionService { mempool: Arc, block_import: Arc, l1_data_provider: Arc, - _metrics_handle: MetricsRegistry, + _metrics_handle: &MetricsRegistry, _telemetry: TelemetryHandle, ) -> anyhow::Result { if config.block_production_disabled { @@ -66,7 +66,7 @@ impl Service for BlockProductionService { if is_devnet { // DEVNET: we the genesis block for the devnet if not deployed, otherwise we only print the devnet keys. - let keys = if (backend.get_latest_block_n().context("Getting the latest block number in db")?).is_none() { + let keys = if backend.get_latest_block_n().context("Getting the latest block number in db")?.is_none() { // deploy devnet genesis log::info!("⛏️ Deploying devnet genesis block"); diff --git a/crates/node/src/service/l1.rs b/crates/node/src/service/l1.rs index 306a0957e..b82b8f899 100644 --- a/crates/node/src/service/l1.rs +++ b/crates/node/src/service/l1.rs @@ -27,7 +27,7 @@ impl L1SyncService { pub async fn new( config: &L1SyncParams, db: &DatabaseService, - metrics_handle: MetricsRegistry, + metrics_handle: &MetricsRegistry, l1_gas_provider: GasPriceProvider, chain_id: ChainId, l1_core_address: H160, @@ -37,7 +37,7 @@ impl L1SyncService { if let Some(l1_rpc_url) = &config.l1_endpoint { let core_address = Address::from_slice(l1_core_address.as_bytes()); let l1_block_metrics = - L1BlockMetrics::register(&metrics_handle).expect("Registering prometheus metrics"); + L1BlockMetrics::register(metrics_handle).expect("Registering prometheus metrics"); Some( EthereumClient::new(l1_rpc_url.clone(), core_address, l1_block_metrics) .await diff --git a/crates/node/src/service/rpc.rs b/crates/node/src/service/rpc.rs index 91089149d..4653d6072 100644 --- a/crates/node/src/service/rpc.rs +++ b/crates/node/src/service/rpc.rs @@ -27,7 +27,7 @@ impl RpcService { config: &RpcParams, db: &DatabaseService, chain_config: Arc, - metrics_handle: MetricsRegistry, + metrics_handle: &MetricsRegistry, add_txs_method_provider: Arc, ) -> anyhow::Result { if config.rpc_disabled { @@ -48,7 +48,7 @@ impl RpcService { }; let (read, write, trace) = (rpcs, rpcs, rpcs); let starknet = Starknet::new(Arc::clone(db.backend()), chain_config.clone(), add_txs_method_provider); - let metrics = RpcMetrics::register(&metrics_handle)?; + let metrics = RpcMetrics::register(metrics_handle)?; Ok(Self { server_config: Some(ServerConfig { diff --git a/crates/node/src/service/sync.rs b/crates/node/src/service/sync.rs index 8437e4afd..00537e01d 100644 --- a/crates/node/src/service/sync.rs +++ b/crates/node/src/service/sync.rs @@ -1,10 +1,8 @@ use crate::cli::{NetworkType, SyncParams}; use anyhow::Context; -use mc_db::db_metrics::DbMetrics; +use mc_block_import::BlockImporter; use mc_db::{DatabaseService, MadaraBackend}; -use mc_metrics::MetricsRegistry; use mc_sync::fetch::fetchers::FetchConfig; -use mc_sync::metrics::block_metrics::BlockMetrics; use mc_telemetry::TelemetryHandle; use mp_chain_config::ChainConfig; use mp_utils::service::Service; @@ -15,11 +13,10 @@ use tokio::task::JoinSet; #[derive(Clone)] pub struct SyncService { db_backend: Arc, + block_importer: Arc, fetch_config: FetchConfig, backup_every_n_blocks: Option, starting_block: Option, - block_metrics: BlockMetrics, - db_metrics: DbMetrics, start_params: Option, disabled: bool, pending_block_poll_interval: Duration, @@ -31,11 +28,9 @@ impl SyncService { chain_config: Arc, network: NetworkType, db: &DatabaseService, - metrics_handle: MetricsRegistry, + block_importer: Arc, telemetry: TelemetryHandle, ) -> anyhow::Result { - let block_metrics = BlockMetrics::register(&metrics_handle)?; - let db_metrics = DbMetrics::register(&metrics_handle)?; let fetch_config = config.block_fetch_config(chain_config.chain_id.clone(), network); Ok(Self { @@ -43,8 +38,7 @@ impl SyncService { fetch_config, starting_block: config.unsafe_starting_block, backup_every_n_blocks: config.backup_every_n_blocks, - block_metrics, - db_metrics, + block_importer, start_params: Some(telemetry), disabled: config.sync_disabled, pending_block_poll_interval: Duration::from_secs(config.pending_block_poll_interval), @@ -62,9 +56,8 @@ impl Service for SyncService { fetch_config, backup_every_n_blocks, starting_block, - block_metrics, - db_metrics, pending_block_poll_interval, + block_importer, .. } = self.clone(); let telemetry = self.start_params.take().context("Service already started")?; @@ -74,11 +67,10 @@ impl Service for SyncService { join_set.spawn(async move { mc_sync::sync( &db_backend, + block_importer, fetch_config, starting_block, backup_every_n_blocks, - block_metrics, - db_metrics, telemetry, pending_block_poll_interval, ) diff --git a/crates/node/src/util.rs b/crates/node/src/util.rs index 72194ea78..58d6c6f00 100644 --- a/crates/node/src/util.rs +++ b/crates/node/src/util.rs @@ -86,16 +86,17 @@ pub fn setup_logging() -> anyhow::Result<()> { Level::Warn => { writeln!( fmt, - "{brackets}[{brackets:#}{} {style}{}{style:#}{brackets}]{brackets:#} ⚠️ {}", + "{brackets}[{brackets:#}{} {style}{}{style:#} {}{brackets}]{brackets:#} ⚠️ {}", ts, record.level(), + record.target(), record.args() ) } Level::Error if record.target() == "rpc_errors" => { writeln!( fmt, - "{brackets}[{brackets:#}{} {style}{}{style:#}{brackets}]{brackets:#} ❗ {}", + "{brackets}[{brackets:#}{} {style}{}{style:#}{brackets}]{brackets:#} ❗ RPC Internal Server Error: {}", ts, record.level(), record.args() diff --git a/crates/primitives/chain_config/presets/integration.yaml b/crates/primitives/chain_config/presets/integration.yaml index 463e9fc41..6110e0358 100644 --- a/crates/primitives/chain_config/presets/integration.yaml +++ b/crates/primitives/chain_config/presets/integration.yaml @@ -11,6 +11,7 @@ eth_core_contract_address: "0x4737c0c1B4D5b1A687B42610DdabEE781152359c" latest_protocol_version: "0.13.2" block_time: 360 pending_block_update_time: 2 +execution_batch_size: 16 bouncer_config: block_max_capacity: builtin_count: diff --git a/crates/primitives/chain_config/presets/mainnet.yaml b/crates/primitives/chain_config/presets/mainnet.yaml index 2d06f00b1..64afeb2e8 100644 --- a/crates/primitives/chain_config/presets/mainnet.yaml +++ b/crates/primitives/chain_config/presets/mainnet.yaml @@ -11,6 +11,7 @@ eth_core_contract_address: "0xc662c410C0ECf747543f5bA90660f6ABeBD9C8c4" latest_protocol_version: "0.13.2" block_time: 360 pending_block_update_time: 2 +execution_batch_size: 16 bouncer_config: block_max_capacity: builtin_count: diff --git a/crates/primitives/chain_config/presets/sepolia.yaml b/crates/primitives/chain_config/presets/sepolia.yaml index f5b9a3697..da2fdfe00 100644 --- a/crates/primitives/chain_config/presets/sepolia.yaml +++ b/crates/primitives/chain_config/presets/sepolia.yaml @@ -11,6 +11,7 @@ eth_core_contract_address: "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057" latest_protocol_version: "0.13.2" block_time: 360 pending_block_update_time: 2 +execution_batch_size: 16 bouncer_config: block_max_capacity: builtin_count: diff --git a/crates/primitives/chain_config/presets/test.yaml b/crates/primitives/chain_config/presets/test.yaml index b39dffc88..24dc59ec3 100644 --- a/crates/primitives/chain_config/presets/test.yaml +++ b/crates/primitives/chain_config/presets/test.yaml @@ -11,6 +11,7 @@ eth_core_contract_address: "0xE2Bb56ee936fd6433DC0F6e7e3b8365C906AA057" latest_protocol_version: "0.13.2" block_time: 360 pending_block_update_time: 2 +execution_batch_size: 16 bouncer_config: block_max_capacity: builtin_count: diff --git a/crates/primitives/chain_config/src/chain_config.rs b/crates/primitives/chain_config/src/chain_config.rs index 457171557..757fbcb95 100644 --- a/crates/primitives/chain_config/src/chain_config.rs +++ b/crates/primitives/chain_config/src/chain_config.rs @@ -57,6 +57,8 @@ impl FromStr for ChainPreset { } } +// TODO: the motivation for these doc comments is to move them into a proper app chain developer documentation, with a +// proper page about tuning the block production performance. #[derive(Debug)] pub struct ChainVersionedConstants(pub BTreeMap); @@ -138,26 +140,50 @@ pub struct ChainConfig { pub chain_name: String, pub chain_id: ChainId, - /// For starknet, this is the STRK ERC-20 contract on starknet. + /// For Starknet, this is the STRK ERC-20 contract on L2. pub native_fee_token_address: ContractAddress, - /// For starknet, this is the ETH ERC-20 contract on starknet. + /// For Starknet, this is the ETH ERC-20 contract on L2. pub parent_fee_token_address: ContractAddress, - /// BTreeMap ensures order. + /// Ordered mapping from protocol version to execution constants, such as the gas cost of the built-ins and such. + // BTreeMap ensures order. pub versioned_constants: ChainVersionedConstants, + // TODO: document supported versions (blockifier is not fully backward compatible) #[serde(deserialize_with = "deserialize_starknet_version")] pub latest_protocol_version: StarknetVersion, /// Only used for block production. + /// Time between every block. + // TODO: document what happens when block time is missed, implement different configurable behaviors. #[serde(deserialize_with = "deserialize_duration")] pub block_time: Duration, + /// Only used for block production. - /// Block time is divided into "ticks": everytime this duration elapses, the pending block is updated. + /// + /// Block time is divided into "ticks": everytime this duration elapses, the pending block is updated. + /// Each pending block tick will try to fill up the bouncer capacity proportionally to the tick index within the entire block time. + /// Note that having a low update time and bouncer capacity together, but leaving a high execution batch size will likely result in + /// block production suboptimally trying to execute a lot more transactions than what would fit in the bouncer capacity for a single tick. + /// This is especially relevant when optimistic parallelization is enabled. + /// + /// ## Why does Starknet have a pending block anyway? + /// + /// The pending block is an artifact to allow low transaction confirmation times that may go away when a peer-to-peer + /// consensus will be the norm. + /// A low block time, or a pending block with a low update time will yield the same execution performance - but the big difference is + /// that network sync is not optimized for very fast and very small blocks yet. On top of that, the prover may have special needs in terms + /// of those as well. The pending block is a stop-gap solution to these problems in the meantime. #[serde(deserialize_with = "deserialize_duration")] pub pending_block_update_time: Duration, - /// The bouncer is in charge of limiting block sizes. This is where the max number of step per block, gas etc are. /// Only used for block production. + /// Block production is handled in batches; each batch will pop this number of transactions from the mempool. This is + /// primarily useful for optimistic parallelization. + /// A value too high may have a performance impact - you will need some testing to find the best value for your network. + pub execution_batch_size: usize, + + /// Only used for block production. + /// The bouncer is in charge of limiting block sizes. This is where the max number of step per block, gas etc are. #[serde(deserialize_with = "deserialize_bouncer_config")] pub bouncer_config: BouncerConfig, @@ -169,7 +195,7 @@ pub struct ChainConfig { /// This number is the maximum nonce the invoke tx can have to qualify for the validation skip. pub max_nonce_for_validation_skip: u64, - /// The Starknet core contract address for the L1 watcher. + /// The Starknet core contract address on L1, used by the L1 watcher service. pub eth_core_contract_address: H160, } @@ -189,6 +215,21 @@ impl ChainConfig { serde_yaml::from_str(&config_str).context("While deserializing chain config") } + /// Verify that the chain config is valid for block production. + pub fn precheck_block_production(&self) -> anyhow::Result<()> { + // block_time != 0 implies that n_pending_ticks_per_block != 0. + if self.sequencer_address == ContractAddress::default() { + bail!("Sequencer address cannot be 0x0 for block production.") + } + if self.block_time.as_millis() == 0 { + bail!("Block time cannot be zero for block production.") + } + if self.pending_block_update_time.as_millis() == 0 { + bail!("Block time cannot be zero for block production.") + } + Ok(()) + } + /// Returns the Chain Config preset for Starknet Mainnet. pub fn starknet_mainnet() -> anyhow::Result { // Sources: @@ -216,7 +257,9 @@ impl ChainConfig { } /// This is the number of pending ticks (see [`ChainConfig::pending_block_update_time`]) in a block. + /// The chain config needs to be checked with [`ChainConfig::precheck_block_production`] beforehand. pub fn n_pending_ticks_per_block(&self) -> usize { + // Division by zero: see [`ChainConfig::precheck_block_production`]. (self.block_time.as_millis() / self.pending_block_update_time.as_millis()) as usize } diff --git a/crates/primitives/class/Cargo.toml b/crates/primitives/class/Cargo.toml index f9d33b05d..103324bb6 100644 --- a/crates/primitives/class/Cargo.toml +++ b/crates/primitives/class/Cargo.toml @@ -31,11 +31,13 @@ starknet-core = { workspace = true } starknet-types-core = { workspace = true } # Other +anyhow = { workspace = true } flate2 = { workspace = true } lazy_static = { workspace = true } num-bigint = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +sha3 = { workspace = true } thiserror = { workspace = true } [dev-dependencies] diff --git a/crates/primitives/class/src/class_hash.rs b/crates/primitives/class/src/class_hash.rs index 9979ad31b..674547ff1 100644 --- a/crates/primitives/class/src/class_hash.rs +++ b/crates/primitives/class/src/class_hash.rs @@ -3,19 +3,27 @@ use starknet_types_core::{ hash::{Poseidon, StarkHash}, }; -use crate::{ContractClass, FlattenedSierraClass, SierraEntryPoint}; +use crate::{ + convert::{parse_compressed_legacy_class, ParseCompressedLegacyClassError}, + CompressedLegacyContractClass, ContractClass, FlattenedSierraClass, SierraEntryPoint, +}; +use starknet_core::types::contract::ComputeClassHashError as StarknetComputeClassHashError; #[derive(Debug, thiserror::Error)] pub enum ComputeClassHashError { #[error("Unsupported Sierra version: {0}")] UnsupportedSierraVersion(String), + #[error(transparent)] + StarknetError(#[from] StarknetComputeClassHashError), + #[error(transparent)] + ParseError(#[from] ParseCompressedLegacyClassError), } impl ContractClass { pub fn compute_class_hash(&self) -> Result { match self { ContractClass::Sierra(sierra) => sierra.compute_class_hash(), - ContractClass::Legacy(_) => unimplemented!("Legacy class hash computation"), + ContractClass::Legacy(legacy) => legacy.compute_class_hash(), } } } @@ -53,6 +61,13 @@ fn compute_hash_entries_point(entry_points: &[SierraEntryPoint]) -> Felt { Poseidon::hash_array(&entry_pointfalten) } +impl CompressedLegacyContractClass { + pub fn compute_class_hash(&self) -> Result { + let legacy_contract_class = parse_compressed_legacy_class(self.clone().into())?; + legacy_contract_class.class_hash().map_err(ComputeClassHashError::from) + } +} + #[cfg(test)] mod tests { use starknet_core::types::BlockId; @@ -81,304 +96,3 @@ mod tests { assert_eq!(computed_class_hash, class_hash); } } - -// use anyhow::{Context, Error, Ok, Result}; -// use flate2::read::GzDecoder; -// use serde::Serialize; -// use serde_json::{Map, Value}; -// use sha3::{Digest, Keccak256}; -// use starknet_core::types::{ -// contract::legacy::{ -// LegacyContractClass, LegacyEntrypointOffset, LegacyProgram, RawLegacyEntryPoint, RawLegacyEntryPoints, -// }, -// CompressedLegacyContractClass, ContractClass, LegacyContractEntryPoint, -// }; -// use starknet_types_core::{ -// felt::Felt, -// hash::{Pedersen, StarkHash}, -// }; -// use std::io::Read; - -// pub trait ClassHash { -// fn class_hash(&self) -> anyhow::Result; -// } - -// impl ClassHash for ContractClass { -// fn class_hash(&self) -> anyhow::Result { -// match self { -// ContractClass::Sierra(sierra) => Ok(sierra.class_hash()), -// ContractClass::Legacy(legacy) => legacy.class_hash(), -// } -// } -// } - -// // Define the HashChain struct -// #[derive(Default)] -// pub struct HashChain { -// hash: Felt, -// count: usize, -// } - -// impl HashChain { -// pub fn update(&mut self, value: &Felt) { -// // Replace this with the actual Pedersen hash function implementation -// self.hash = Pedersen::hash(&self.hash, value); -// self.count += 1; -// } - -// pub fn finalize(self) -> Felt { -// // Replace this with the actual Pedersen hash function implementation -// Pedersen::hash(&self.hash, &Felt::from(self.count)) -// } -// } - -// impl ClassHash for CompressedLegacyContractClass { -// fn class_hash(&self) -> anyhow::Result { -// let mut contract_definition = parse_compressed_legacy_class(self.clone())?; -// contract_definition.program.debug_info = None; - -// if let Some(attributes) = &mut contract_definition.program.attributes { -// attributes.iter_mut().try_for_each(|attr| -> anyhow::Result<()> { -// if attr.accessible_scopes.is_empty() { -// attr.accessible_scopes.clear(); -// } -// if attr.flow_tracking_data.is_none() { -// attr.flow_tracking_data = None; -// } -// Ok(()) -// })?; -// } - -// fn add_extra_space_to_legacy_named_tuples(value: &mut Value) { -// match value { -// Value::Array(v) => walk_array(v), -// Value::Object(m) => walk_map(m), -// _ => {} -// } -// } - -// fn walk_array(array: &mut [Value]) { -// for v in array.iter_mut() { -// add_extra_space_to_legacy_named_tuples(v); -// } -// } - -// fn walk_map(object: &mut Map) { -// for (k, v) in object.iter_mut() { -// match v { -// Value::String(s) => { -// let new_value = add_extra_space_to_named_tuple_type_definition(k, s); -// if new_value.as_ref() != s { -// *v = Value::String(new_value.into()); -// } -// } -// _ => add_extra_space_to_legacy_named_tuples(v), -// } -// } -// } - -// fn add_extra_space_to_named_tuple_type_definition<'a>(key: &str, value: &'a str) -> std::borrow::Cow<'a, str> { -// use std::borrow::Cow::*; -// match key { -// "cairo_type" | "value" => Owned(add_extra_space_before_colon(value)), -// _ => Borrowed(value), -// } -// } - -// fn add_extra_space_before_colon(v: &str) -> String { -// v.replace(": ", " : ").replace(" :", " :") -// } - -// if contract_definition.program.compiler_version.is_none() { -// let mut identifiers_value = serde_json::to_value(&mut contract_definition.program.identifiers)?; -// add_extra_space_to_legacy_named_tuples(&mut identifiers_value); -// contract_definition.program.identifiers = serde_json::from_value(identifiers_value)?; - -// let mut reference_manager_value = serde_json::to_value(&mut contract_definition.program.reference_manager)?; -// add_extra_space_to_legacy_named_tuples(&mut reference_manager_value); -// contract_definition.program.reference_manager = serde_json::from_value(reference_manager_value)?; -// } - -// let truncated_keccak = { -// use std::io::Write; - -// let mut string_buffer = vec![]; -// let mut ser = serde_json::Serializer::new(&mut string_buffer); -// contract_definition.serialize(&mut ser).context("Serializing contract_definition for Keccak256")?; - -// let raw_json_output = String::from_utf8(string_buffer)?; - -// let mut keccak = Keccak256::new(); -// keccak.write_all(raw_json_output.as_bytes()).expect("writing to Keccak256 never fails"); - -// Felt::from_bytes_be_slice(&keccak.finalize()) -// }; - -// const API_VERSION: Felt = Felt::ZERO; - -// let mut outer = HashChain::default(); -// outer.update(&API_VERSION); - -// ["constructor", "external", "l1_handler"] -// .iter() -// .map(|key| { -// let empty_vec = Vec::new(); -// let entry_points = match *key { -// "constructor" => &contract_definition.entry_points_by_type.constructor, -// "external" => &contract_definition.entry_points_by_type.external, -// "l1_handler" => &contract_definition.entry_points_by_type.l1_handler, -// _ => &empty_vec, -// }; - -// entry_points -// .iter() -// .flat_map(|x| { -// [ -// x.selector, -// match x.offset { -// LegacyEntrypointOffset::U64AsHex(v) => Felt::from(v), -// LegacyEntrypointOffset::U64AsInt(v) => Felt::from(v), -// }, -// ] -// .into_iter() -// }) -// .fold(HashChain::default(), |mut hc, next| { -// hc.update(&next); -// hc -// }) -// }) -// .for_each(|x| outer.update(&x.finalize())); - -// fn update_hash_chain(mut hc: HashChain, next: &Felt) -> Result { -// hc.update(next); -// Ok(hc) -// } - -// let builtins = contract_definition -// .program -// .builtins -// .iter() -// .map(|s| Felt::from_bytes_be_slice(s.as_bytes())) -// .try_fold(HashChain::default(), |acc, item| update_hash_chain(acc, &item)) -// .context("Failed to process contract_definition.program.builtins")?; - -// outer.update(&builtins.finalize()); -// outer.update(&truncated_keccak); - -// let bytecodes = contract_definition -// .program -// .data -// .iter() -// .try_fold(HashChain::default(), update_hash_chain) -// .context("Failed to process contract_definition.program.data")?; - -// outer.update(&bytecodes.finalize()); - -// Ok(outer.finalize()) -// } -// } - -// pub fn parse_compressed_legacy_class(class: CompressedLegacyContractClass) -> Result { -// let mut gzip_decoder = GzDecoder::new(class.program.as_slice()); -// let mut program_json = String::new(); -// gzip_decoder.read_to_string(&mut program_json).context("Failed to read gzip compressed class program to string")?; - -// let program = serde_json::from_str::(&program_json).context("Failed to parse program JSON")?; - -// let is_pre_0_11_0 = match &program.compiler_version { -// Some(compiler_version) => { -// let minor_version = compiler_version -// .split('.') -// .nth(1) -// .ok_or_else(|| anyhow::anyhow!("Unexpected legacy compiler version string"))?; - -// let minor_version: u8 = minor_version.parse().context("Failed to parse minor version")?; -// minor_version < 11 -// } -// None => true, -// }; - -// let abi = match class.abi { -// Some(abi) => abi.into_iter().map(|item| item.into()).collect(), -// None => vec![], -// }; - -// Ok(LegacyContractClass { -// abi: Some(abi), -// entry_points_by_type: RawLegacyEntryPoints { -// constructor: class -// .entry_points_by_type -// .constructor -// .into_iter() -// .map(|item| parse_legacy_entrypoint(&item, is_pre_0_11_0)) -// .collect(), -// external: class -// .entry_points_by_type -// .external -// .into_iter() -// .map(|item| parse_legacy_entrypoint(&item, is_pre_0_11_0)) -// .collect(), -// l1_handler: class -// .entry_points_by_type -// .l1_handler -// .into_iter() -// .map(|item| parse_legacy_entrypoint(&item, is_pre_0_11_0)) -// .collect(), -// }, -// program, -// }) -// } - -// fn parse_legacy_entrypoint(entrypoint: &LegacyContractEntryPoint, pre_0_11_0: bool) -> RawLegacyEntryPoint { -// RawLegacyEntryPoint { -// // This doesn't really matter as it doesn't affect class hashes. We simply try to guess as -// // close as possible. -// offset: if pre_0_11_0 { -// LegacyEntrypointOffset::U64AsHex(entrypoint.offset) -// } else { -// LegacyEntrypointOffset::U64AsInt(entrypoint.offset) -// }, -// selector: entrypoint.selector, -// } -// } - -// #[cfg(test)] -// mod tests { -// use super::*; -// use starknet_core::types::BlockId; -// use starknet_core::types::BlockTag; -// use starknet_core::types::ContractClass; -// use starknet_providers::{Provider, SequencerGatewayProvider}; -// use starknet_types_core::felt::Felt; - -// #[tokio::test] -// async fn test_sierra_compute_class_hash() { -// let provider = SequencerGatewayProvider::starknet_alpha_mainnet(); - -// let class_hash = Felt::from_hex_unchecked("0x06c3fdaa2255c83d7fa4a01e21c46bdb55d25c616af8462ea1b3461538b163b5"); - -// let class = provider.get_class(BlockId::Tag(BlockTag::Latest), class_hash).await.unwrap(); - -// if let ContractClass::Sierra(sierra) = class { -// assert_eq!(sierra.class_hash(), class_hash); -// } else { -// panic!("Not a Sierra contract"); -// } -// } - -// #[tokio::test] -// #[ignore] -// async fn test_legacy_compute_class_hash() { -// let provider = SequencerGatewayProvider::starknet_alpha_mainnet(); - -// let class_hash = Felt::from_hex_unchecked("0x010455c752b86932ce552f2b0fe81a880746649b9aee7e0d842bf3f52378f9f8"); - -// let class = provider.get_class(BlockId::Tag(BlockTag::Latest), class_hash).await.unwrap(); - -// if let ContractClass::Legacy(legacy) = class { -// assert_eq!(legacy.class_hash().unwrap(), class_hash); -// } else { -// panic!("Not a Lecacy contract"); -// } -// } -// } diff --git a/crates/primitives/class/src/convert.rs b/crates/primitives/class/src/convert.rs new file mode 100644 index 000000000..474bc11eb --- /dev/null +++ b/crates/primitives/class/src/convert.rs @@ -0,0 +1,86 @@ +use flate2::bufread::GzDecoder; +use starknet_core::types::LegacyContractEntryPoint; +use starknet_core::types::{ + contract::legacy::{ + LegacyContractClass, LegacyEntrypointOffset, LegacyProgram, RawLegacyEntryPoint, RawLegacyEntryPoints, + }, + CompressedLegacyContractClass, +}; +use std::io::Read; + +#[derive(Debug, thiserror::Error)] +pub enum ParseCompressedLegacyClassError { + #[error("I/O error: {0}")] + IoError(#[from] std::io::Error), + #[error("JSON parse error: {0}")] + JsonError(#[from] serde_json::Error), + #[error("Unexpected legacy compiler version string")] + InvalidCompilerVersion, + #[error("Integer parse error: {0}")] + ParseIntError(#[from] std::num::ParseIntError), +} + +/// Attempts to recover a compressed legacy program. +pub fn parse_compressed_legacy_class( + class: CompressedLegacyContractClass, +) -> Result { + let mut gzip_decoder = GzDecoder::new(class.program.as_slice()); + let mut program_json = String::new(); + gzip_decoder.read_to_string(&mut program_json)?; + + let program = serde_json::from_str::(&program_json)?; + + let is_pre_0_11_0 = match &program.compiler_version { + Some(compiler_version) => { + let minor_version = + compiler_version.split('.').nth(1).ok_or(ParseCompressedLegacyClassError::InvalidCompilerVersion)?; + + let minor_version: u8 = minor_version.parse()?; + minor_version < 11 + } + None => true, + }; + + let abi = match class.abi { + Some(abi) => abi.into_iter().map(|item| item.into()).collect(), + None => vec![], + }; + + Ok(LegacyContractClass { + abi: Some(abi), + entry_points_by_type: RawLegacyEntryPoints { + constructor: class + .entry_points_by_type + .constructor + .into_iter() + .map(|item| parse_legacy_entrypoint(&item, is_pre_0_11_0)) + .collect(), + external: class + .entry_points_by_type + .external + .into_iter() + .map(|item| parse_legacy_entrypoint(&item, is_pre_0_11_0)) + .collect(), + l1_handler: class + .entry_points_by_type + .l1_handler + .into_iter() + .map(|item| parse_legacy_entrypoint(&item, is_pre_0_11_0)) + .collect(), + }, + program, + }) +} + +fn parse_legacy_entrypoint(entrypoint: &LegacyContractEntryPoint, pre_0_11_0: bool) -> RawLegacyEntryPoint { + RawLegacyEntryPoint { + // This doesn't really matter as it doesn't affect class hashes. We simply try to guess as + // close as possible. + offset: if pre_0_11_0 { + LegacyEntrypointOffset::U64AsHex(entrypoint.offset) + } else { + LegacyEntrypointOffset::U64AsInt(entrypoint.offset) + }, + selector: entrypoint.selector, + } +} diff --git a/crates/primitives/class/src/lib.rs b/crates/primitives/class/src/lib.rs index 6d7acd15c..a8dd3fac2 100644 --- a/crates/primitives/class/src/lib.rs +++ b/crates/primitives/class/src/lib.rs @@ -5,6 +5,7 @@ use starknet_types_core::felt::Felt; pub mod class_hash; pub mod class_update; pub mod compile; +pub mod convert; mod into_starknet_core; #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/primitives/convert/src/lib.rs b/crates/primitives/convert/src/lib.rs index d2c24b701..1ea5ecf6f 100644 --- a/crates/primitives/convert/src/lib.rs +++ b/crates/primitives/convert/src/lib.rs @@ -2,7 +2,7 @@ mod felt; mod to_felt; pub use felt::{felt_to_u128, felt_to_u64}; -pub use to_felt::ToFelt; +pub use to_felt::{DisplayFeltAsHex, FeltHexDisplay, ToFelt}; pub mod test { /// Asserts that the conversion between two types is consistent. diff --git a/crates/primitives/convert/src/to_felt.rs b/crates/primitives/convert/src/to_felt.rs index 73b217758..7edf31717 100644 --- a/crates/primitives/convert/src/to_felt.rs +++ b/crates/primitives/convert/src/to_felt.rs @@ -1,6 +1,7 @@ use primitive_types::H160; use starknet_types_core::felt::Felt; +use core::fmt; use std::ops::Deref; use starknet_api::block::BlockHash; @@ -93,6 +94,33 @@ impl_for_wrapper!(EntryPointSelector); impl_for_wrapper!(CompiledClassHash); impl_for_wrapper!(ContractAddressSalt); +pub trait FeltHexDisplay { + /// Force-display this felt as hexadecimal when using the [`fmt::Display`] or [`fmt::Debug`] traits. + fn hex_display(self) -> DisplayFeltAsHex; +} +impl FeltHexDisplay for T { + fn hex_display(self) -> DisplayFeltAsHex { + DisplayFeltAsHex(self.to_felt()) + } +} +impl FeltHexDisplay for Felt { + fn hex_display(self) -> DisplayFeltAsHex { + DisplayFeltAsHex(self) + } +} + +pub struct DisplayFeltAsHex(pub Felt); +impl fmt::Display for DisplayFeltAsHex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:#x}", self.0) + } +} +impl fmt::Debug for DisplayFeltAsHex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self, f) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/transactions/Cargo.toml b/crates/primitives/transactions/Cargo.toml index 2951178e4..5f2d0ea78 100644 --- a/crates/primitives/transactions/Cargo.toml +++ b/crates/primitives/transactions/Cargo.toml @@ -30,6 +30,7 @@ starknet_api = { workspace = true } # Other anyhow = { workspace = true } +log = { workspace = true } num-bigint = { workspace = true } serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } diff --git a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs index 59e44de9c..96cb5e30f 100644 --- a/crates/primitives/transactions/src/broadcasted_to_blockifier.rs +++ b/crates/primitives/transactions/src/broadcasted_to_blockifier.rs @@ -38,17 +38,12 @@ pub fn broadcasted_to_blockifier( (blockifier::transaction::transaction_execution::Transaction, Option), BroadcastedToBlockifierError, > { - // TODO: when class_hash computation is fixed on legacy contract classes, remove this check - if let starknet_core::types::BroadcastedTransaction::Declare( - starknet_core::types::BroadcastedDeclareTransaction::V1(_), - ) = &transaction - { - return Err(BroadcastedToBlockifierError::LegacyContractClassesNotSupported); - } let (class_info, class_hash, extra_class_info) = match &transaction { starknet_core::types::BroadcastedTransaction::Declare(tx) => match tx { starknet_core::types::BroadcastedDeclareTransaction::V1(tx) => { - let class_hash = Felt::ZERO; + let compressed_legacy_class: CompressedLegacyContractClass = (*tx.contract_class).clone().into(); + let class_hash = compressed_legacy_class.compute_class_hash().unwrap(); + log::debug!("Computed legacy class hash: {:?}", class_hash); let compressed_legacy_class: CompressedLegacyContractClass = (*tx.contract_class).clone().into(); let class_blockifier = compressed_legacy_class .to_blockifier_class()