From 9889fc5c297008afe34fc71782ab38e7a0ad101c Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 9 Dec 2024 18:06:58 +0100 Subject: [PATCH 1/5] Speed up demo-native --- .env | 5 +-- docker-compose.yaml | 12 +++++-- process-compose.yaml | 25 ++++++++++---- sequencer-sqlite/Cargo.lock | 2 +- sequencer/src/bin/deploy.rs | 13 +++++++- sequencer/src/bin/espresso-bridge.rs | 45 ++++++++++++++++++++++---- sequencer/src/bin/espresso-dev-node.rs | 17 +++++++++- types/src/v0/impls/header.rs | 2 +- utils/src/deployer.rs | 6 ++-- 9 files changed, 103 insertions(+), 24 deletions(-) diff --git a/.env b/.env index 128a6994d..8d867a82b 100644 --- a/.env +++ b/.env @@ -35,6 +35,7 @@ ESPRESSO_SEQUENCER_MAX_CONNECTIONS=25 ESPRESSO_SEQUENCER_STORAGE_PATH=/store/sequencer ESPRESSO_SEQUENCER_GENESIS_FILE=/genesis/demo.toml ESPRESSO_SEQUENCER_L1_PORT=8545 +ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL=100ms ESPRESSO_SEQUENCER_L1_WS_PORT=8546 ESPRESSO_SEQUENCER_L1_PROVIDER=http://demo-l1-network:${ESPRESSO_SEQUENCER_L1_PORT} ESPRESSO_SEQUENCER_L1_WS_PROVIDER=ws://demo-l1-network:${ESPRESSO_SEQUENCER_L1_WS_PORT} @@ -149,5 +150,5 @@ INTEGRATION_TEST_PROTO=http # `03`, marketplace upgrade will be tested. INTEGRATION_TEST_SEQUENCER_VERSION=02 -# max database connections -ESPRESSO_SEQUENCER_DATABASE_MAX_CONNECTIONS=25 \ No newline at end of file +# max database connections +ESPRESSO_SEQUENCER_DATABASE_MAX_CONNECTIONS=25 diff --git a/docker-compose.yaml b/docker-compose.yaml index 1e0fe5e8b..c369bff17 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -21,6 +21,7 @@ services: environment: - ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_DEPLOYER_ACCOUNT_INDEX - RUST_LOG - RUST_LOG_FORMAT @@ -35,6 +36,7 @@ services: environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_URL - ESPRESSO_SEQUENCER_STAKE_TABLE_CAPACITY - ESPRESSO_SEQUENCER_PERMISSIONED_PROVER @@ -57,6 +59,8 @@ services: command: espresso-bridge deposit environment: - L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_PROVIDER + - L1_POLLING_INTERVAL=$ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL + - REQUIRE_L1_FINALITY=false - ESPRESSO_PROVIDER=http://sequencer1:$ESPRESSO_SEQUENCER_API_PORT - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC @@ -236,6 +240,7 @@ services: - ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=password - ESPRESSO_SEQUENCER_POSTGRES_DATABASE=sequencer - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_0 @@ -293,6 +298,7 @@ services: - ESPRESSO_SEQUENCER_POSTGRES_PASSWORD=password - ESPRESSO_SEQUENCER_POSTGRES_DATABASE=sequencer - ESPRESSO_SEQUENCER_L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_WS_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_1 @@ -345,6 +351,7 @@ services: - ESPRESSO_SEQUENCER_MAX_CONNECTIONS - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer3:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_2 @@ -395,6 +402,7 @@ services: - ESPRESSO_SEQUENCER_MAX_CONNECTIONS - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer4:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_WS_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_3 @@ -446,6 +454,7 @@ services: - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer0:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_API_PEERS=http://sequencer0:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - ESPRESSO_SEQUENCER_L1_EVENTS_MAX_BLOCK_RANGE - ESPRESSO_STATE_RELAY_SERVER_URL - ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY=$ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_4 @@ -733,8 +742,7 @@ services: dev-rollup: image: ghcr.io/espressosystems/espresso-sequencer/dev-rollup:main - command: - dev-rollup register --ns 1; dev-rollup register --ns 2; dev-rollup register --ns 3 + command: dev-rollup register --ns 1; dev-rollup register --ns 2; dev-rollup register --ns 3 environment: - ESPRESSO_MARKETPLACE_SOLVER_API_URL=http://marketplace-solver:$ESPRESSO_MARKETPLACE_SOLVER_API_PORT depends_on: diff --git a/process-compose.yaml b/process-compose.yaml index ba66f7417..a1950c504 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -18,11 +18,9 @@ processes: readiness_probe: exec: command: "[ $(docker inspect -f '{{.State.Health.Status}}' espresso-sequencer-demo-l1-network-1) = 'healthy' ]" - initial_delay_seconds: 5 - period_seconds: 6 + period_seconds: 1 timeout_seconds: 5 - success_threshold: 1 - failure_threshold: 20 + failure_threshold: 30 deploy-sequencer-contracts: # The contract addresses are implicitly inherited from .env. We need to unset these or else the @@ -56,6 +54,8 @@ processes: namespace: setup environment: - L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT + - L1_POLLING_INTERVAL=$ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL + - REQUIRE_L1_FINALITY=false - ESPRESSO_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC @@ -89,6 +89,8 @@ processes: state-relay-server: command: state-relay-server readiness_probe: + failure_threshold: 10 + period_seconds: 1 http_get: scheme: http host: localhost @@ -163,6 +165,7 @@ processes: host: localhost port: $ESPRESSO_SEQUENCER_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 sequencer1: @@ -215,6 +218,7 @@ processes: host: localhost port: $ESPRESSO_SEQUENCER1_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 availability: restart: exit_on_failure @@ -262,6 +266,7 @@ processes: port: $ESPRESSO_SEQUENCER2_API_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true restart: exit_on_failure @@ -310,6 +315,7 @@ processes: port: $ESPRESSO_SEQUENCER3_API_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true restart: exit_on_failure @@ -356,6 +362,7 @@ processes: port: $ESPRESSO_SEQUENCER4_API_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true restart: exit_on_failure @@ -387,9 +394,9 @@ processes: host: localhost port: $ESPRESSO_SEQUENCER1_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 - # We use KeyDB (a Redis variant) to maintain consistency between # different parts of the CDN # Cheating a bit here too, but KeyDB is not available as a Nix package. @@ -416,6 +423,7 @@ processes: host: 127.0.0.1 port: 9093 path: /metrics + period_seconds: 1 failure_threshold: 100 # A broker is the main message-routing unit of the CDN @@ -430,6 +438,7 @@ processes: host: 127.0.0.1 port: 9091 path: /metrics + period_seconds: 1 failure_threshold: 100 # A broker is the main message-routing unit of the CDN @@ -448,6 +457,7 @@ processes: host: 127.0.0.1 port: 9092 path: /metrics + period_seconds: 1 failure_threshold: 100 cdn-whitelist: @@ -477,6 +487,7 @@ processes: port: $ESPRESSO_SUBMIT_TRANSACTIONS_PUBLIC_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true @@ -497,6 +508,7 @@ processes: port: $ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: exit_on_skipped: true @@ -520,6 +532,7 @@ processes: port: $ESPRESSO_BUILDER_SERVER_PORT path: /healthcheck failure_threshold: 100 + period_seconds: 1 availability: restart: "exit_on_failure" @@ -560,6 +573,7 @@ processes: host: localhost port: $ESPRESSO_MARKETPLACE_SOLVER_API_PORT path: /healthcheck + period_seconds: 1 failure_threshold: 100 sequencer-db-0: @@ -623,4 +637,3 @@ processes: depends_on: sequencer1: condition: process_healthy - diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index ad03aad19..1b0040642 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4083,7 +4083,7 @@ dependencies = [ [[package]] name = "hotshot-query-service" version = "0.1.75" -source = "git+https://github.com/EspressoSystems/hotshot-query-service?branch=hotshot/0.5.82#5e2c984d19da3826f4cc8d80c5cf1a84dcd377f7" +source = "git+https://github.com/EspressoSystems/hotshot-query-service?tag=v0.1.75#dffefa160f441a663723a67bc54efedb11a88b02" dependencies = [ "anyhow", "ark-serialize", diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 6a87bfc9c..9003408cf 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -1,6 +1,7 @@ -use std::{fs::File, io::stdout, path::PathBuf}; +use std::{fs::File, io::stdout, path::PathBuf, time::Duration}; use clap::Parser; +use espresso_types::parse_duration; use ethers::types::Address; use futures::FutureExt; use hotshot_stake_table::config::STAKE_TABLE_CAPACITY; @@ -38,6 +39,15 @@ struct Options { )] rpc_url: Url, + /// Request rate when polling L1. + #[clap( + long, + env = "ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration, + )] + pub l1_polling_interval: Duration, + /// URL of a sequencer node that is currently providing the HotShot config. /// This is used to initialize the stake table. #[clap( @@ -128,6 +138,7 @@ async fn main() -> anyhow::Result<()> { let contracts = deploy( opt.rpc_url, + opt.l1_polling_interval, opt.mnemonic, opt.account_index, opt.multisig_address, diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 18e8ecfa4..99b83c69d 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -2,7 +2,7 @@ use anyhow::{bail, ensure, Context}; use clap::{Parser, Subcommand}; use client::SequencerClient; use contract_bindings::fee_contract::FeeContract; -use espresso_types::{eth_signature_key::EthKeyPair, Header}; +use espresso_types::{eth_signature_key::EthKeyPair, parse_duration, Header}; use ethers::{ middleware::{Middleware, SignerMiddleware}, providers::Provider, @@ -10,7 +10,7 @@ use ethers::{ }; use futures::stream::StreamExt; use sequencer_utils::logging; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; use surf_disco::Url; /// Command-line utility for working with the Espresso bridge. @@ -37,6 +37,21 @@ struct Deposit { #[clap(short, long, env = "L1_PROVIDER")] rpc_url: Url, + /// Request rate when polling L1. + #[clap( + short, + long, + env = "L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration + )] + l1_interval: Duration, + + /// Whether to require L1 finality to consider deposit + /// finalized on Espresso. + #[clap(short = 'f', long, env = "REQUIRE_L1_FINALITY", default_value = "true")] + require_l1_finality: bool, + /// Espresso query service provider. /// /// This must point to an Espresso node running the /availability, /node and Merklized state @@ -106,6 +121,16 @@ struct L1Balance { #[clap(short, long, env = "L1_PROVIDER")] rpc_url: Url, + /// Request rate when polling L1. + #[clap( + short, + long, + env = "L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration + )] + l1_interval: Duration, + /// Account to check. #[clap(short, long, env = "ADDRESS", required_unless_present = "mnemonic")] address: Option
, @@ -134,7 +159,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { let key_pair = EthKeyPair::from_mnemonic(opt.mnemonic, opt.account_index)?; // Connect to L1. - let rpc = Provider::try_from(opt.rpc_url.to_string())?; + let rpc = Provider::try_from(opt.rpc_url.to_string())?.interval(opt.l1_interval); let signer = key_pair.signer(); let l1 = Arc::new(SignerMiddleware::new_with_provider_chain(rpc, signer).await?); let contract = FeeContract::new(opt.contract_address, l1.clone()); @@ -195,10 +220,16 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { continue; } }; - let Some(l1_finalized) = header.l1_finalized() else { - continue; + let l1_finalized = if opt.require_l1_finality { + if let Some(l1_finalized) = header.l1_finalized() { + l1_finalized.number + } else { + continue; + } + } else { + header.l1_head() }; - if l1_finalized.number >= l1_block { + if l1_finalized >= l1_block { tracing::info!(block = header.height(), "deposit finalized on Espresso"); break header.height(); } else { @@ -258,7 +289,7 @@ async fn l1_balance(opt: L1Balance) -> anyhow::Result<()> { bail!("address or mnemonic must be provided"); }; - let l1 = Provider::try_from(opt.rpc_url.to_string())?; + let l1 = Provider::try_from(opt.rpc_url.to_string())?.interval(opt.l1_interval); let block = opt.block.map(BlockId::from); tracing::debug!(%address, ?block, "fetching L1 balance"); diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index b7cd9b7fe..ae9399648 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -42,6 +42,17 @@ struct Args { /// If this is not provided, an Avil node will be launched automatically. #[clap(short, long, env = "ESPRESSO_SEQUENCER_L1_PROVIDER")] rpc_url: Option, + + /// Request rate when polling L1. + #[clap( + short, + long, + env = "ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL", + default_value = "7s", + value_parser = parse_duration + )] + l1_interval: Duration, + /// Mnemonic for an L1 wallet. /// /// This wallet is used to deploy the contracts, @@ -165,6 +176,7 @@ async fn main() -> anyhow::Result<()> { retry_interval, alt_prover_retry_intervals, alt_prover_update_intervals, + l1_interval, } = cli_params; logging.init(); @@ -262,6 +274,7 @@ async fn main() -> anyhow::Result<()> { let contracts = deploy( url.clone(), + l1_interval, mnemonic.clone(), account_index, multisig_address, @@ -273,7 +286,9 @@ async fn main() -> anyhow::Result<()> { ) .await?; - let provider = Provider::::try_from(url.as_str()).unwrap(); + let provider = Provider::::try_from(url.as_str()) + .unwrap() + .interval(l1_interval); let chain_id = provider.get_chainid().await.unwrap().as_u64(); let wallet = MnemonicBuilder::::default() diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 1577a2a38..f410858ea 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -673,7 +673,7 @@ impl Header { &mut *field_mut!(self.l1_head) } - /// The Espresso block header includes information a bout the latest finalized L1 block. + /// The Espresso block header includes information about the latest finalized L1 block. /// /// Similar to [`l1_head`](Self::l1_head), rollups can use this information to implement a /// bridge between the L1 and L2 while retaining the finality of low-latency block confirmations diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index ce95f8ba2..bbdd2f6f6 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -18,8 +18,7 @@ use futures::future::{BoxFuture, FutureExt}; use hotshot_contract_adapter::light_client::{ LightClientConstructorArgs, ParsedLightClientState, ParsedStakeTableState, }; -use std::sync::Arc; -use std::{collections::HashMap, io::Write, ops::Deref}; +use std::{collections::HashMap, io::Write, ops::Deref, sync::Arc, time::Duration}; use url::Url; /// Set of predeployed contracts. @@ -300,6 +299,7 @@ pub async fn deploy_mock_light_client_contract( #[allow(clippy::too_many_arguments)] pub async fn deploy( l1url: Url, + l1_interval: Duration, mnemonic: String, account_index: u32, multisig_address: Option, @@ -309,7 +309,7 @@ pub async fn deploy( permissioned_prover: Option
, mut contracts: Contracts, ) -> anyhow::Result { - let provider = Provider::::try_from(l1url.to_string())?; + let provider = Provider::::try_from(l1url.to_string())?.interval(l1_interval); let chain_id = provider.get_chainid().await?.as_u64(); let wallet = MnemonicBuilder::::default() .phrase(mnemonic.as_str()) From 79fcacb1a866307533fcc72021da1ee59d2de962 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 9 Dec 2024 18:38:01 +0100 Subject: [PATCH 2/5] Rollback l1_finalized changes --- docker-compose.yaml | 1 - process-compose.yaml | 1 - sequencer/src/bin/espresso-bridge.rs | 17 +++-------------- 3 files changed, 3 insertions(+), 16 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index c369bff17..dcfbee706 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -60,7 +60,6 @@ services: environment: - L1_PROVIDER=$ESPRESSO_SEQUENCER_L1_PROVIDER - L1_POLLING_INTERVAL=$ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - - REQUIRE_L1_FINALITY=false - ESPRESSO_PROVIDER=http://sequencer1:$ESPRESSO_SEQUENCER_API_PORT - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC diff --git a/process-compose.yaml b/process-compose.yaml index a1950c504..a19c1f7b0 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -55,7 +55,6 @@ processes: environment: - L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - L1_POLLING_INTERVAL=$ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL - - REQUIRE_L1_FINALITY=false - ESPRESSO_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT - CONTRACT_ADDRESS=0xa15bb66138824a1c7167f5e85b957d04dd34e468 - MNEMONIC=$ESPRESSO_BUILDER_ETH_MNEMONIC diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 99b83c69d..2ae2c0263 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -47,11 +47,6 @@ struct Deposit { )] l1_interval: Duration, - /// Whether to require L1 finality to consider deposit - /// finalized on Espresso. - #[clap(short = 'f', long, env = "REQUIRE_L1_FINALITY", default_value = "true")] - require_l1_finality: bool, - /// Espresso query service provider. /// /// This must point to an Espresso node running the /availability, /node and Merklized state @@ -220,16 +215,10 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { continue; } }; - let l1_finalized = if opt.require_l1_finality { - if let Some(l1_finalized) = header.l1_finalized() { - l1_finalized.number - } else { - continue; - } - } else { - header.l1_head() + let Some(l1_finalized) = header.l1_finalized() else { + continue; }; - if l1_finalized >= l1_block { + if l1_finalized.number() >= l1_block { tracing::info!(block = header.height(), "deposit finalized on Espresso"); break header.height(); } else { From 8cd4f976e366c528c8ba6e39a31522dbd70e0344 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 9 Dec 2024 13:08:26 -0800 Subject: [PATCH 3/5] Fix deadlock and race condition in proposal fetching (#2379) * Fix deadlock and race condition in proposal fetching * Change from broadcast channel to multi-consumer channel. This means only one fetcher task will receive each proposal to be fetched, which is the actual behavior we want. Before, with broadcast, we had multiple fetchers always fetching the same proposal, which is why we saw race conditions causing database serialization errors. It should now be possible to reenable multiple workers. * Use an unbounded channel. This prevents a deadlock where a consumer sends back into the channel (e.g. to recursively fetch the parent of the proposal it had just fetched), but the channel is full, blocking the consumer, the very task responsible for clearing the blockage. * Add metrics for proposal fetcher --- Cargo.lock | 2 +- Cargo.toml | 1 + justfile | 14 +- sequencer-sqlite/Cargo.lock | 2 +- sequencer/Cargo.toml | 2 +- sequencer/src/context.rs | 191 ++--------------------- sequencer/src/lib.rs | 4 +- sequencer/src/options.rs | 2 +- sequencer/src/proposal_fetcher.rs | 244 ++++++++++++++++++++++++++++++ 9 files changed, 275 insertions(+), 187 deletions(-) create mode 100644 sequencer/src/proposal_fetcher.rs diff --git a/Cargo.lock b/Cargo.lock index 727655059..9a06129d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8595,7 +8595,7 @@ dependencies = [ "anyhow", "ark-ff", "ark-serialize", - "async-broadcast", + "async-channel 2.3.1", "async-lock 3.4.0", "async-once-cell", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 011701805..12738fd66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ ark-poly = "0.4" ark-serialize = "0.4" ark-srs = "0.3.1" async-broadcast = "0.7.0" +async-channel = "2" async-lock = "3" async-once-cell = "0.5" async-trait = "0.1" diff --git a/justfile b/justfile index 5d7521a92..f2badd3a5 100644 --- a/justfile +++ b/justfile @@ -10,13 +10,21 @@ demo *args: demo-native *args: build scripts/demo-native {{args}} -build: +lint: + #!/usr/bin/env bash + set -euxo pipefail + # Use the same target dir for both `clippy` invocations + export CARGO_TARGET_DIR=${CARGO_TARGET_DIR:-target} + cargo clippy --workspace --features testing --all-targets -- -D warnings + cargo clippy --workspace --all-targets --manifest-path sequencer-sqlite/Cargo.toml -- -D warnings + +build profile="test": #!/usr/bin/env bash set -euxo pipefail # Use the same target dir for both `build` invocations export CARGO_TARGET_DIR=${CARGO_TARGET_DIR:-target} - cargo build --profile test - cargo build --profile test --manifest-path ./sequencer-sqlite/Cargo.toml + cargo build --profile {{profile}} + cargo build --profile {{profile}} --manifest-path ./sequencer-sqlite/Cargo.toml demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 1b0040642..b2f2e85e6 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -8319,7 +8319,7 @@ dependencies = [ "anyhow", "ark-ff", "ark-serialize", - "async-broadcast", + "async-channel 2.3.1", "async-lock 3.4.0", "async-once-cell", "async-trait", diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 5fb19d8f6..f9948fc79 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -43,7 +43,7 @@ vergen = { workspace = true } anyhow = { workspace = true } ark-ff = { workspace = true } ark-serialize = { workspace = true, features = ["derive"] } -async-broadcast = { workspace = true } +async-channel = { workspace = true } async-lock = { workspace = true } async-once-cell = { workspace = true } async-trait = { workspace = true } diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 06e27e3b1..d0cbfe61e 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -1,13 +1,9 @@ use std::{fmt::Display, sync::Arc}; use anyhow::Context; -use async_broadcast::{broadcast, Receiver, Sender}; use async_lock::RwLock; -use clap::Parser; -use committable::Commitment; use derivative::Derivative; use espresso_types::{ - parse_duration, v0::traits::{EventConsumer as PersistenceEventConsumer, SequencerPersistence}, NodeState, PubKey, Transaction, ValidatedState, }; @@ -22,32 +18,26 @@ use hotshot::{ }; use hotshot_events_service::events_source::{EventConsumer, EventsStreamer}; use parking_lot::Mutex; -use tokio::{ - spawn, - task::JoinHandle, - time::{sleep, timeout}, -}; +use tokio::{spawn, task::JoinHandle}; use hotshot_orchestrator::client::OrchestratorClient; use hotshot_types::{ consensus::ConsensusMetricsValue, - data::{EpochNumber, Leaf2, ViewNumber}, + data::{Leaf2, ViewNumber}, network::NetworkConfig, traits::{ metrics::Metrics, network::ConnectedNetwork, - node_implementation::{ConsensusTime, NodeType, Versions}, - ValidatedState as _, + node_implementation::{NodeType, Versions}, }, - utils::{View, ViewInner}, PeerConfig, ValidatorConfig, }; -use std::time::Duration; use tracing::{Instrument, Level}; use url::Url; use crate::{ external_event_handler::{self, ExternalEventHandler}, + proposal_fetcher::ProposalFetcherConfig, state_signature::StateSigner, static_stake_table_commitment, Node, SeqTypes, SequencerApiVersion, }; @@ -55,37 +45,6 @@ use crate::{ /// The consensus handle pub type Consensus = SystemContextHandle, V>; -#[derive(Clone, Copy, Debug, Parser)] -pub struct ProposalFetcherConfig { - #[clap( - long = "proposal-fetcher-num-workers", - env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_NUM_WORKERS", - default_value = "2" - )] - pub num_workers: usize, - - #[clap( - long = "proposal-fetcher-channel-capacity", - env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_CHANNEL_CAPACITY", - default_value = "100" - )] - pub channel_capacity: usize, - - #[clap( - long = "proposal-fetcher-fetch-timeout", - env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_FETCH_TIMEOUT", - default_value = "2s", - value_parser = parse_duration, - )] - pub fetch_timeout: Duration, -} - -impl Default for ProposalFetcherConfig { - fn default() -> Self { - Self::parse_from(std::iter::empty::()) - } -} - /// The sequencer context contains a consensus handle and other sequencer specific information. #[derive(Derivative, Clone)] #[derivative(Debug(bound = ""))] @@ -210,6 +169,7 @@ impl, P: SequencerPersistence, V: Versions> Sequence event_consumer, anchor_view, proposal_fetcher_cfg, + metrics, ) .with_task_list(tasks)) } @@ -228,6 +188,7 @@ impl, P: SequencerPersistence, V: Versions> Sequence event_consumer: impl PersistenceEventConsumer + 'static, anchor_view: Option, proposal_fetcher_cfg: ProposalFetcherConfig, + metrics: &dyn Metrics, ) -> Self { let events = handle.event_stream(); @@ -245,19 +206,12 @@ impl, P: SequencerPersistence, V: Versions> Sequence }; // Spawn proposal fetching tasks. - let (send, recv) = broadcast(proposal_fetcher_cfg.channel_capacity); - ctx.spawn("proposal scanner", scan_proposals(ctx.handle.clone(), send)); - for i in 0..proposal_fetcher_cfg.num_workers { - ctx.spawn( - format!("proposal fetcher {i}"), - fetch_proposals( - ctx.handle.clone(), - persistence.clone(), - recv.clone(), - proposal_fetcher_cfg.fetch_timeout, - ), - ); - } + proposal_fetcher_cfg.spawn( + &mut ctx.tasks, + ctx.handle.clone(), + persistence.clone(), + metrics, + ); // Spawn event handling loop. ctx.spawn( @@ -475,127 +429,6 @@ async fn handle_events( } } -#[tracing::instrument(skip_all)] -async fn scan_proposals( - consensus: Arc>>, - fetcher: Sender<(ViewNumber, Commitment>)>, -) where - N: ConnectedNetwork, - P: SequencerPersistence, - V: Versions, -{ - let mut events = consensus.read().await.event_stream(); - while let Some(event) = events.next().await { - let EventType::QuorumProposal { proposal, .. } = event.event else { - continue; - }; - // Whenever we see a quorum proposal, ensure we have the chain of proposals stretching back - // to the anchor. This allows state replay from the decided state. - let parent_view = proposal.data.justify_qc.view_number; - let parent_leaf = proposal.data.justify_qc.data.leaf_commit; - fetcher - .broadcast_direct((parent_view, parent_leaf)) - .await - .ok(); - } -} - -#[tracing::instrument(skip_all)] -async fn fetch_proposals( - consensus: Arc>>, - persistence: Arc, - mut scanner: Receiver<(ViewNumber, Commitment>)>, - fetch_timeout: Duration, -) where - N: ConnectedNetwork, - P: SequencerPersistence, - V: Versions, -{ - let sender = scanner.new_sender(); - while let Some((view, leaf)) = scanner.next().await { - let span = tracing::warn_span!("fetch proposal", ?view, %leaf); - let res: anyhow::Result<()> = async { - let anchor_view = load_anchor_view(&*persistence).await; - if view <= anchor_view { - tracing::debug!(?anchor_view, "skipping already-decided proposal"); - return Ok(()); - } - - match persistence.load_quorum_proposal(view).await { - Ok(proposal) => { - // If we already have the proposal in storage, keep traversing the chain to its - // parent. - let view = proposal.data.justify_qc.view_number; - let leaf = proposal.data.justify_qc.data.leaf_commit; - sender.broadcast_direct((view, leaf)).await.ok(); - return Ok(()); - } - Err(err) => { - tracing::info!("proposal missing from storage; fetching from network: {err:#}"); - } - } - - let future = - consensus - .read() - .await - .request_proposal(view, EpochNumber::genesis(), leaf)?; - let proposal = timeout(fetch_timeout, future) - .await - .context("timed out fetching proposal")? - .context("error fetching proposal")?; - persistence - .append_quorum_proposal(&proposal) - .await - .context("error saving fetched proposal")?; - - // Add the fetched leaf to HotShot state, so consensus can make use of it. - let leaf = Leaf2::from_quorum_proposal(&proposal.data); - let handle = consensus.read().await; - let consensus = handle.consensus(); - let mut consensus = consensus.write().await; - if matches!( - consensus.validated_state_map().get(&view), - None | Some(View { - // Replace a Da-only view with a Leaf view, which has strictly more information. - view_inner: ViewInner::Da { .. } - }) - ) { - let state = Arc::new(ValidatedState::from_header(leaf.block_header())); - if let Err(err) = consensus.update_leaf(leaf, state, None) { - tracing::warn!("unable to update leaf: {err:#}"); - } - } - - Ok(()) - } - .instrument(span) - .await; - if let Err(err) = res { - tracing::warn!("failed to fetch proposal: {err:#}"); - - // Avoid busy loop when operations are failing. - sleep(Duration::from_secs(1)).await; - - // If we fail fetching the proposal, don't let it clog up the fetching task. Just push - // it back onto the queue and move onto the next proposal. - sender.broadcast_direct((view, leaf)).await.ok(); - } - } -} - -async fn load_anchor_view(persistence: &impl SequencerPersistence) -> ViewNumber { - loop { - match persistence.load_anchor_view().await { - Ok(view) => break view, - Err(err) => { - tracing::warn!("error loading anchor view: {err:#}"); - sleep(Duration::from_secs(1)).await; - } - } - } -} - #[derive(Debug, Default, Clone)] #[allow(clippy::type_complexity)] pub(crate) struct TaskList(Arc)>>>); diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 98fc95893..b4d2a4e5e 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -2,6 +2,7 @@ pub mod api; pub mod catchup; pub mod context; pub mod genesis; +mod proposal_fetcher; mod external_event_handler; pub mod options; @@ -13,7 +14,7 @@ mod message_compat_tests; use anyhow::Context; use catchup::StatePeers; -use context::{ProposalFetcherConfig, SequencerContext}; +use context::SequencerContext; use espresso_types::{ traits::EventConsumer, BackoffParams, L1ClientOptions, NodeState, PubKey, SeqTypes, SolverAuctionResultsProvider, ValidatedState, @@ -21,6 +22,7 @@ use espresso_types::{ use genesis::L1Finalized; use hotshot::traits::election::static_committee::StaticCommittee; use hotshot_types::traits::election::Membership; +use proposal_fetcher::ProposalFetcherConfig; use std::sync::Arc; use tokio::select; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 9882283d3..b7fabe1d7 100644 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -21,7 +21,7 @@ use hotshot_types::{light_client::StateSignKey, signature_key::BLSPrivKey}; use libp2p::Multiaddr; use url::Url; -use crate::{api, context::ProposalFetcherConfig, persistence}; +use crate::{api, persistence, proposal_fetcher::ProposalFetcherConfig}; // This options struct is a bit unconventional. The sequencer has multiple optional modules which // can be added, in any combination, to the service. These include, for example, the API server. diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs new file mode 100644 index 000000000..5c753ece8 --- /dev/null +++ b/sequencer/src/proposal_fetcher.rs @@ -0,0 +1,244 @@ +use std::sync::Arc; + +use anyhow::Context; +use async_channel::{Receiver, Sender}; +use async_lock::RwLock; +use clap::Parser; +use committable::Commitment; +use derivative::Derivative; +use espresso_types::{parse_duration, v0::traits::SequencerPersistence, PubKey, ValidatedState}; +use futures::stream::StreamExt; +use hotshot::types::EventType; +use hotshot_types::{ + data::{EpochNumber, Leaf2, ViewNumber}, + traits::{ + metrics::{Counter, Gauge, Metrics}, + network::ConnectedNetwork, + node_implementation::{ConsensusTime, Versions}, + ValidatedState as _, + }, + utils::{View, ViewInner}, +}; +use std::time::Duration; +use tokio::time::{sleep, timeout}; +use tracing::Instrument; + +use crate::{ + context::{Consensus, TaskList}, + SeqTypes, +}; + +#[derive(Clone, Copy, Debug, Parser)] +pub struct ProposalFetcherConfig { + #[clap( + long = "proposal-fetcher-num-workers", + env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_NUM_WORKERS", + default_value = "2" + )] + pub num_workers: usize, + + #[clap( + long = "proposal-fetcher-fetch-timeout", + env = "ESPRESSO_SEQUENCER_PROPOSAL_FETCHER_FETCH_TIMEOUT", + default_value = "2s", + value_parser = parse_duration, + )] + pub fetch_timeout: Duration, +} + +impl Default for ProposalFetcherConfig { + fn default() -> Self { + Self::parse_from(std::iter::empty::()) + } +} + +impl ProposalFetcherConfig { + pub(crate) fn spawn( + self, + tasks: &mut TaskList, + consensus: Arc>>, + persistence: Arc

, + metrics: &(impl Metrics + ?Sized), + ) where + N: ConnectedNetwork, + P: SequencerPersistence, + V: Versions, + { + let (sender, receiver) = async_channel::unbounded(); + let fetcher = ProposalFetcher { + sender, + consensus, + persistence, + cfg: self, + metrics: ProposalFetcherMetrics::new(metrics), + }; + + tasks.spawn("proposal scanner", fetcher.clone().scan()); + for i in 0..self.num_workers { + tasks.spawn( + format!("proposal fetcher {i}"), + fetcher.clone().fetch(receiver.clone()), + ); + } + } +} + +#[derive(Clone, Debug)] +struct ProposalFetcherMetrics { + fetched: Arc, + failed: Arc, + queue_len: Arc, + last_seen: Arc, + last_fetched: Arc, +} + +impl ProposalFetcherMetrics { + fn new(metrics: &(impl Metrics + ?Sized)) -> Self { + let metrics = metrics.subgroup("proposal_fetcher".into()); + Self { + fetched: metrics.create_counter("fetched".into(), None).into(), + failed: metrics.create_counter("failed".into(), None).into(), + queue_len: metrics.create_gauge("queue_len".into(), None).into(), + last_seen: metrics + .create_gauge("last_seen".into(), Some("view".into())) + .into(), + last_fetched: metrics + .create_gauge("last_fetched".into(), Some("view".into())) + .into(), + } + } +} + +type Request = (ViewNumber, Commitment>); + +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +struct ProposalFetcher +where + N: ConnectedNetwork, + P: SequencerPersistence, + V: Versions, +{ + sender: Sender, + #[derivative(Debug = "ignore")] + consensus: Arc>>, + #[derivative(Debug = "ignore")] + persistence: Arc

, + cfg: ProposalFetcherConfig, + metrics: ProposalFetcherMetrics, +} + +impl ProposalFetcher +where + N: ConnectedNetwork, + P: SequencerPersistence, + V: Versions, +{ + #[tracing::instrument(skip_all)] + async fn scan(self) { + let mut events = self.consensus.read().await.event_stream(); + while let Some(event) = events.next().await { + let EventType::QuorumProposal { proposal, .. } = event.event else { + continue; + }; + // Whenever we see a quorum proposal, ensure we have the chain of proposals stretching back + // to the anchor. This allows state replay from the decided state. + let parent_view = proposal.data.justify_qc.view_number; + let parent_leaf = proposal.data.justify_qc.data.leaf_commit; + self.request((parent_view, parent_leaf)).await; + } + } + + #[tracing::instrument(skip_all)] + async fn fetch(self, receiver: Receiver<(ViewNumber, Commitment>)>) { + let mut receiver = std::pin::pin!(receiver); + while let Some(req) = receiver.next().await { + self.fetch_request(req).await; + } + } + + async fn request(&self, req: Request) { + self.sender.send(req).await.ok(); + self.metrics.queue_len.set(self.sender.len()); + self.metrics.last_seen.set(req.0.u64() as usize); + } + + async fn fetch_request(&self, (view, leaf): Request) { + let span = tracing::warn_span!("fetch proposal", ?view, %leaf); + let res: anyhow::Result<()> = async { + let anchor_view = self + .persistence + .load_anchor_view() + .await + .context("loading anchor view")?; + if view <= anchor_view { + tracing::debug!(?anchor_view, "skipping already-decided proposal"); + return Ok(()); + } + + match self.persistence.load_quorum_proposal(view).await { + Ok(proposal) => { + // If we already have the proposal in storage, keep traversing the chain to its + // parent. + let view = proposal.data.justify_qc.view_number; + let leaf = proposal.data.justify_qc.data.leaf_commit; + self.request((view, leaf)).await; + return Ok(()); + } + Err(err) => { + tracing::info!("proposal missing from storage; fetching from network: {err:#}"); + } + } + + let future = + self.consensus + .read() + .await + .request_proposal(view, EpochNumber::genesis(), leaf)?; + let proposal = timeout(self.cfg.fetch_timeout, future) + .await + .context("timed out fetching proposal")? + .context("error fetching proposal")?; + self.persistence + .append_quorum_proposal(&proposal) + .await + .context("error saving fetched proposal")?; + + // Add the fetched leaf to HotShot state, so consensus can make use of it. + let leaf = Leaf2::from_quorum_proposal(&proposal.data); + let handle = self.consensus.read().await; + let consensus = handle.consensus(); + let mut consensus = consensus.write().await; + if matches!( + consensus.validated_state_map().get(&view), + None | Some(View { + // Replace a Da-only view with a Leaf view, which has strictly more information. + view_inner: ViewInner::Da { .. } + }) + ) { + let state = Arc::new(ValidatedState::from_header(leaf.block_header())); + if let Err(err) = consensus.update_leaf(leaf, state, None) { + tracing::warn!("unable to update leaf: {err:#}"); + } + } + + self.metrics.last_fetched.set(view.u64() as usize); + self.metrics.fetched.add(1); + + Ok(()) + } + .instrument(span) + .await; + if let Err(err) = res { + tracing::warn!("failed to fetch proposal: {err:#}"); + self.metrics.failed.add(1); + + // Avoid busy loop when operations are failing. + sleep(Duration::from_secs(1)).await; + + // If we fail fetching the proposal, don't let it clog up the fetching task. Just push + // it back onto the queue and move onto the next proposal. + self.request((view, leaf)).await; + } + } +} From c52924eaece3eab34b79bd272a248a695eb7b1c2 Mon Sep 17 00:00:00 2001 From: Jeb Bearer Date: Mon, 9 Dec 2024 17:09:50 -0800 Subject: [PATCH 4/5] Improve catchup (#2375) * Limit time for each catchup request to defend against malicious peers * Rank catchup peers by reliability; try most reliable peers first * Add metrics exposing a node's idea of its peers' catchup reliability * cargo sort --- Cargo.lock | 12 + Cargo.toml | 1 + builder/src/non_permissioned.rs | 2 + marketplace-builder/src/builder.rs | 2 + sequencer-sqlite/Cargo.lock | 12 + sequencer/Cargo.toml | 1 + sequencer/src/api.rs | 16 +- sequencer/src/catchup.rs | 337 ++++++++++++++++++--------- sequencer/src/lib.rs | 10 +- types/src/v0/impls/instance_state.rs | 3 + types/src/v0/traits.rs | 73 ++++-- types/src/v0/utils.rs | 9 +- 12 files changed, 348 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a06129d8..6cde26b5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7413,6 +7413,17 @@ dependencies = [ "uint", ] +[[package]] +name = "priority-queue" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714c75db297bc88a63783ffc6ab9f830698a6705aa0201416931759ef4c8183d" +dependencies = [ + "autocfg", + "equivalent", + "indexmap 2.6.0", +] + [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -8642,6 +8653,7 @@ dependencies = [ "parking_lot", "portpicker", "pretty_assertions", + "priority-queue", "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", diff --git a/Cargo.toml b/Cargo.toml index 12738fd66..c52556dc4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -126,6 +126,7 @@ thiserror = "1.0.69" tracing = "0.1" bytesize = "1.3" itertools = "0.12" +priority-queue = "2" rand_chacha = "0.3" rand_distr = "0.4" reqwest = "0.12" diff --git a/builder/src/non_permissioned.rs b/builder/src/non_permissioned.rs index ab33c1fb3..e07509d6d 100644 --- a/builder/src/non_permissioned.rs +++ b/builder/src/non_permissioned.rs @@ -19,6 +19,7 @@ use hotshot_types::{ data::{fake_commitment, ViewNumber}, traits::{ block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, + metrics::NoMetrics, node_implementation::Versions, EncodeBytes, }, @@ -53,6 +54,7 @@ pub async fn build_instance_state( Arc::new(StatePeers::::from_urls( state_peers, Default::default(), + &NoMetrics, )), V::Base::VERSION, ); diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index 46a2005f5..f0daf5e31 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -30,6 +30,7 @@ use hotshot_types::{ data::{fake_commitment, Leaf, ViewNumber}, traits::{ block_contents::{vid_commitment, Transaction as _, GENESIS_VID_NUM_STORAGE_NODES}, + metrics::NoMetrics, node_implementation::{ConsensusTime, NodeType, Versions}, EncodeBytes, }, @@ -74,6 +75,7 @@ pub async fn build_instance_state( Arc::new(StatePeers::::from_urls( state_peers, Default::default(), + &NoMetrics, )), V::Base::version(), ); diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index b2f2e85e6..ca6f27327 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -7139,6 +7139,17 @@ dependencies = [ "uint", ] +[[package]] +name = "priority-queue" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "714c75db297bc88a63783ffc6ab9f830698a6705aa0201416931759ef4c8183d" +dependencies = [ + "autocfg", + "equivalent", + "indexmap 2.7.0", +] + [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -8359,6 +8370,7 @@ dependencies = [ "num_enum", "parking_lot", "portpicker", + "priority-queue", "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index f9948fc79..b95b182af 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -95,6 +95,7 @@ marketplace-solver = { path = "../marketplace-solver" } num_enum = "0.7" parking_lot = "0.12" portpicker = { workspace = true } +priority-queue = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rand_distr = { workspace = true } diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 12f8191e2..a3cd5c593 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1527,6 +1527,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .build(); @@ -1571,6 +1572,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ), &NoMetrics, test_helpers::STAKE_TABLE_CAPACITY_FOR_TEST, @@ -1636,6 +1638,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config(TestConfigBuilder::default().l1_url(l1).build()) @@ -1713,6 +1716,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config(TestConfigBuilder::default().l1_url(l1).build()) @@ -1773,6 +1777,7 @@ mod test { StatePeers::::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], BackoffParams::default(), + &NoMetrics, ) }); @@ -1780,6 +1785,7 @@ mod test { peers[2] = StatePeers::::from_urls( vec![url.clone()], BackoffParams::default(), + &NoMetrics, ); let config = TestNetworkConfigBuilder::::with_num_nodes() @@ -1801,13 +1807,16 @@ mod test { // The catchup should successfully retrieve the correct chain config. let node = &network.peers[0]; let peers = node.node_state().peers; - peers.try_fetch_chain_config(cf.commit()).await.unwrap(); + peers.try_fetch_chain_config(0, cf.commit()).await.unwrap(); // Test a catchup request for node #1, which is connected to a dishonest peer. // This request will result in an error due to the malicious chain config provided by the peer. let node = &network.peers[1]; let peers = node.node_state().peers; - peers.try_fetch_chain_config(cf.commit()).await.unwrap_err(); + peers + .try_fetch_chain_config(0, cf.commit()) + .await + .unwrap_err(); network.server.shut_down().await; handle.abort(); @@ -1963,6 +1972,7 @@ mod test { StatePeers::::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config( @@ -2136,6 +2146,7 @@ mod test { StatePeers::>::from_urls( vec![format!("http://localhost:{port}").parse().unwrap()], Default::default(), + &NoMetrics, ) })) .network_config(TestConfigBuilder::default().l1_url(l1).build()) @@ -2200,6 +2211,7 @@ mod test { let peers = StatePeers::>::from_urls( vec!["https://notarealnode.network".parse().unwrap(), url], Default::default(), + &NoMetrics, ); // Fetch the config from node 1, a different node than the one running the service. diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index ccfe74322..8c8ca7a66 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use anyhow::{bail, Context}; +use anyhow::{anyhow, bail, ensure, Context}; +use async_lock::RwLock; use async_trait::async_trait; use committable::Commitment; use committable::Committable; @@ -9,17 +10,24 @@ use espresso_types::{ v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, }; -use futures::future::{Future, FutureExt}; +use futures::future::{Future, FutureExt, TryFuture, TryFutureExt}; use hotshot_types::{ - data::ViewNumber, network::NetworkConfig, traits::node_implementation::ConsensusTime as _, + data::ViewNumber, + network::NetworkConfig, + traits::{ + metrics::{Counter, CounterFamily, Metrics}, + node_implementation::ConsensusTime as _, + }, ValidatorConfig, }; use itertools::Itertools; use jf_merkle_tree::{prelude::MerkleNode, ForgetableMerkleTreeScheme, MerkleTreeScheme}; +use priority_queue::PriorityQueue; use serde::de::DeserializeOwned; -use std::collections::HashMap; +use std::{cmp::Ordering, collections::HashMap, fmt::Display, time::Duration}; use surf_disco::Request; use tide_disco::error::ServerError; +use tokio::time::timeout; use url::Url; use vbs::version::StaticVersionType; @@ -34,12 +42,20 @@ use crate::{ struct Client { inner: surf_disco::Client, url: Url, + requests: Arc>, + failures: Arc>, } impl Client { - pub fn new(url: Url) -> Self { + pub fn new( + url: Url, + requests: &(impl CounterFamily + ?Sized), + failures: &(impl CounterFamily + ?Sized), + ) -> Self { Self { inner: surf_disco::Client::new(url.clone()), + requests: Arc::new(requests.create(vec![url.to_string()])), + failures: Arc::new(failures.create(vec![url.to_string()])), url, } } @@ -64,49 +80,165 @@ pub(crate) async fn local_and_remote( } } +/// A score of a catchup peer, based on our interactions with that peer. +/// +/// The score accounts for malicious peers -- i.e. peers that gave us an invalid response to a +/// verifiable request -- and faulty/unreliable peers -- those that fail to respond to requests at +/// all. The score has a comparison function where higher is better, or in other words `p1 > p2` +/// means we believe we are more likely to successfully catch up using `p1` than `p2`. This makes it +/// convenient and efficient to collect peers in a priority queue which we can easily convert to a +/// list sorted by reliability. +#[derive(Clone, Copy, Debug, Default)] +struct PeerScore { + requests: usize, + failures: usize, +} + +impl Ord for PeerScore { + fn cmp(&self, other: &Self) -> Ordering { + // Compare failure rates: `self` is better than `other` if + // self.failures / self.requests < other.failures / other.requests + // or equivalently + // other.failures * self.requests > self.failures * other.requests + (other.failures * self.requests).cmp(&(self.failures * other.requests)) + } +} + +impl PartialOrd for PeerScore { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for PeerScore { + fn eq(&self, other: &Self) -> bool { + self.cmp(other).is_eq() + } +} + +impl Eq for PeerScore {} + #[derive(Debug, Clone, Default)] pub struct StatePeers { + // Peer IDs, ordered by reliability score. Each ID is an index into `clients`. + scores: Arc>>, clients: Vec>, backoff: BackoffParams, } impl StatePeers { - pub fn from_urls(urls: Vec, backoff: BackoffParams) -> Self { + async fn fetch( + &self, + retry: usize, + f: impl Fn(Client) -> Fut, + ) -> anyhow::Result + where + Fut: TryFuture, + { + // Since we have generally have multiple peers we can catch up from, we want a fairly + // aggressive timeout for requests: if a peer is not responding quickly, we're better off + // just trying the next one rather than waiting, and this prevents a malicious peer from + // delaying catchup for a long time. + // + // However, if we set the timeout _too_ aggressively, we might fail to catch up even from an + // honest peer, and thus never make progress. Thus, we start with a timeout of 500ms, which + // is aggressive but still very reasonable for an HTTP request. If that fails with all of + // our peers, we increase the timeout by 1 second for each successive retry, until we + // eventually succeed. + let timeout_dur = Duration::from_millis(500) * (retry as u32 + 1); + + // Keep track of which peers we make requests to and which succeed (`true`) or fail (`false`), + // so we can update reliability scores at the end. + let mut requests = HashMap::new(); + let mut res = Err(anyhow!("failed fetching from every peer")); + + // Try each peer in order of reliability score, until we succeed. We clone out of + // `self.scores` because it is small (contains only numeric IDs and scores), so this clone + // is a lot cheaper than holding the read lock the entire time we are making requests (which + // could be a while). + let mut scores = { (*self.scores.read().await).clone() }; + while let Some((id, score)) = scores.pop() { + let client = &self.clients[id]; + tracing::info!("fetching from {}", client.url); + match timeout(timeout_dur, f(client.clone()).into_future()).await { + Ok(Ok(t)) => { + requests.insert(id, true); + res = Ok(t); + break; + } + Ok(Err(err)) => { + tracing::warn!(id, ?score, peer = %client.url, "error from peer: {err:#}"); + requests.insert(id, false); + } + Err(_) => { + tracing::warn!(id, ?score, peer = %client.url, ?timeout_dur, "request timed out"); + requests.insert(id, false); + } + } + } + + // Update client scores. + let mut scores = self.scores.write().await; + for (id, success) in requests { + scores.change_priority_by(&id, |score| { + score.requests += 1; + self.clients[id].requests.add(1); + if !success { + score.failures += 1; + self.clients[id].failures.add(1); + } + }); + } + + res + } + + pub fn from_urls( + urls: Vec, + backoff: BackoffParams, + metrics: &(impl Metrics + ?Sized), + ) -> Self { if urls.is_empty() { panic!("Cannot create StatePeers with no peers"); } + let metrics = metrics.subgroup("catchup".into()); + let requests = metrics.counter_family("requests".into(), vec!["peer".into()]); + let failures = metrics.counter_family("request_failures".into(), vec!["peer".into()]); + + let scores = urls + .iter() + .enumerate() + .map(|(i, _)| (i, PeerScore::default())) + .collect(); + let clients = urls + .into_iter() + .map(|url| Client::new(url, &*requests, &*failures)) + .collect(); + Self { - clients: urls.into_iter().map(Client::new).collect(), + clients, + scores: Arc::new(RwLock::new(scores)), backoff, } } + #[tracing::instrument(skip(self, my_own_validator_config))] pub async fn fetch_config( &self, my_own_validator_config: ValidatorConfig, ) -> anyhow::Result> { self.backoff() - .retry(self, move |provider| { + .retry(self, move |provider, retry| { let my_own_validator_config = my_own_validator_config.clone(); async move { - for client in &provider.clients { - tracing::info!("fetching config from {}", client.url); - match client - .get::("config/hotshot") - .send() - .await - { - Ok(res) => { - return res.into_network_config(my_own_validator_config) - .context(format!("fetched config from {}, but failed to convert to private config", client.url)); - } - Err(err) => { - tracing::warn!("error fetching config from peer: {err:#}"); - } - } - } - bail!("could not fetch config from any peer"); + let cfg = provider + .fetch(retry, |client| { + client.get::("config/hotshot").send() + }) + .await?; + cfg.into_network_config(my_own_validator_config) + .context("fetched config, but failed to convert to private config") } .boxed() }) @@ -119,115 +251,82 @@ impl StateCatchup for StatePeers { #[tracing::instrument(skip(self, _instance))] async fn try_fetch_accounts( &self, + retry: usize, _instance: &NodeState, height: u64, view: ViewNumber, fee_merkle_tree_root: FeeMerkleCommitment, accounts: &[FeeAccount], ) -> anyhow::Result { - for client in self.clients.iter() { - tracing::info!("Fetching accounts from {}", client.url); - let req = match client + self.fetch(retry, |client| async move { + let snapshot = client .inner - .post::(&format!("catchup/{height}/{}/accounts", view.u64(),)) - .body_binary(&accounts.to_vec()) - { - Ok(req) => req, - Err(err) => { - tracing::warn!("failed to construct accounts catchup request: {err:#}"); - continue; - } - }; - let snapshot = match req.send().await { - Ok(res) => res, - Err(err) => { - tracing::info!(peer = %client.url, "error fetching accounts from peer: {err:#}"); - continue; - } - }; + .post::(&format!("catchup/{height}/{}/accounts", view.u64())) + .body_binary(&accounts.to_vec())? + .send() + .await?; // Verify proofs. for account in accounts { - let Some((proof, _)) = FeeAccountProof::prove(&snapshot, (*account).into()) else { - tracing::warn!(peer = %client.url, "response from peer missing account {account}"); - continue; - }; - if let Err(err) = proof.verify(&fee_merkle_tree_root) { - tracing::warn!(peer = %client.url, "peer gave invalid proof for account {account}: {err:#}"); - continue; - } + let (proof, _) = FeeAccountProof::prove(&snapshot, (*account).into()) + .context(format!("response missing account {account}"))?; + proof + .verify(&fee_merkle_tree_root) + .context(format!("invalid proof for accoujnt {account}"))?; } - return Ok(snapshot); - } - bail!("Could not fetch account from any peer"); + anyhow::Ok(snapshot) + }) + .await } #[tracing::instrument(skip(self, _instance, mt))] async fn try_remember_blocks_merkle_tree( &self, + retry: usize, _instance: &NodeState, height: u64, view: ViewNumber, mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { - for client in self.clients.iter() { - tracing::debug!(peer = %client.url, "fetching frontier from peer"); - match client - .get::(&format!("catchup/{height}/{}/blocks", view.u64())) - .send() - .await - { - Ok(frontier) => { - let Some(elem) = frontier.elem() else { - tracing::warn!(peer = %client.url, "Provided frontier is missing leaf element"); - continue; - }; - match mt.remember(mt.num_leaves() - 1, *elem, &frontier) { - Ok(_) => return Ok(()), - Err(err) => { - tracing::warn!(peer = %client.url, "Error verifying block proof: {err:#}"); - continue; - } - } - } - Err(err) => { - tracing::info!(peer = %client.url, "error fetching blocks from peer: {err:#}"); + *mt = self + .fetch(retry, |client| { + let mut mt = mt.clone(); + async move { + let frontier = client + .get::(&format!("catchup/{height}/{}/blocks", view.u64())) + .send() + .await?; + let elem = frontier + .elem() + .context("provided frontier is missing leaf element")?; + mt.remember(mt.num_leaves() - 1, *elem, &frontier) + .context("verifying block proof")?; + anyhow::Ok(mt) } - } - } - bail!("Could not fetch frontier from any peer"); + }) + .await?; + Ok(()) } async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { - for client in self.clients.iter() { - tracing::info!("Fetching chain config from {}", client.url); - match client + self.fetch(retry, |client| async move { + let cf = client .get::(&format!("catchup/chain-config/{}", commitment)) .send() - .await - { - Ok(cf) => { - if cf.commit() == commitment { - return Ok(cf); - } else { - tracing::error!( - "Received chain config with mismatched commitment from {}: expected {}, got {}", - client.url, - commitment, - cf.commit(), - ); - } - } - Err(err) => { - tracing::warn!("Error fetching chain config from peer: {}", err); - } - } - } - bail!("Could not fetch chain config from any peer"); + .await?; + ensure!( + cf.commit() == commitment, + "received chain config with mismatched commitment: expected {commitment}, got {}", + cf.commit() + ); + Ok(cf) + }) + .await } fn backoff(&self) -> &BackoffParams { @@ -358,9 +457,10 @@ where { // TODO: add a test for the account proof validation // issue # 2102 (https://github.com/EspressoSystems/espresso-sequencer/issues/2102) - #[tracing::instrument(skip(self, instance))] + #[tracing::instrument(skip(self, _retry, instance))] async fn try_fetch_accounts( &self, + _retry: usize, instance: &NodeState, block_height: u64, view: ViewNumber, @@ -374,9 +474,10 @@ where .0) } - #[tracing::instrument(skip(self, instance, mt))] + #[tracing::instrument(skip(self, _retry, instance, mt))] async fn try_remember_blocks_merkle_tree( &self, + _retry: usize, instance: &NodeState, bh: u64, view: ViewNumber, @@ -401,6 +502,7 @@ where async fn try_fetch_chain_config( &self, + _retry: usize, commitment: Commitment, ) -> anyhow::Result { let cf = self.db.get_chain_config(commitment).await?; @@ -461,6 +563,7 @@ impl NullStateCatchup { impl StateCatchup for NullStateCatchup { async fn try_fetch_accounts( &self, + _retry: usize, _instance: &NodeState, _height: u64, _view: ViewNumber, @@ -472,6 +575,7 @@ impl StateCatchup for NullStateCatchup { async fn try_remember_blocks_merkle_tree( &self, + _retry: usize, _instance: &NodeState, _height: u64, _view: ViewNumber, @@ -482,6 +586,7 @@ impl StateCatchup for NullStateCatchup { async fn try_fetch_chain_config( &self, + _retry: usize, commitment: Commitment, ) -> anyhow::Result { self.chain_configs @@ -498,3 +603,25 @@ impl StateCatchup for NullStateCatchup { "NullStateCatchup".into() } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_peer_priority() { + let good_peer = PeerScore { + requests: 1000, + failures: 2, + }; + let bad_peer = PeerScore { + requests: 10, + failures: 1, + }; + assert!(good_peer > bad_peer); + + let mut peers: PriorityQueue<_, _> = [(0, good_peer), (1, bad_peer)].into_iter().collect(); + assert_eq!(peers.pop(), Some((0, good_peer))); + assert_eq!(peers.pop(), Some((1, bad_peer))); + } +} diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index b4d2a4e5e..acc973152 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -55,7 +55,7 @@ use hotshot_types::{ light_client::{StateKeyPair, StateSignKey}, signature_key::{BLSPrivKey, BLSPubKey}, traits::{ - metrics::Metrics, + metrics::{Metrics, NoMetrics}, network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType, Versions}, }, @@ -314,8 +314,11 @@ pub async fn init_node( // If we were told to fetch the config from an already-started peer, do so. (None, Some(peers)) => { tracing::info!(?peers, "loading network config from peers"); - let peers = - StatePeers::::from_urls(peers, network_params.catchup_backoff); + let peers = StatePeers::::from_urls( + peers, + network_params.catchup_backoff, + &NoMetrics, + ); let config = peers.fetch_config(validator_config.clone()).await?; tracing::info!( @@ -511,6 +514,7 @@ pub async fn init_node( StatePeers::::from_urls( network_params.state_peers, network_params.catchup_backoff, + metrics, ), ) .await, diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 71a387bbc..bf5e5595f 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -213,6 +213,7 @@ pub mod mock { impl StateCatchup for MockStateCatchup { async fn try_fetch_accounts( &self, + _retry: usize, _instance: &NodeState, _height: u64, view: ViewNumber, @@ -228,6 +229,7 @@ pub mod mock { async fn try_remember_blocks_merkle_tree( &self, + _retry: usize, _instance: &NodeState, _height: u64, view: ViewNumber, @@ -252,6 +254,7 @@ pub mod mock { async fn try_fetch_chain_config( &self, + _retry: usize, _commitment: Commitment, ) -> anyhow::Result { Ok(ChainConfig::default()) diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 6bd58a837..ec4eea666 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -41,6 +41,7 @@ pub trait StateCatchup: Send + Sync { /// Try to fetch the given accounts state, failing without retrying if unable. async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -58,10 +59,18 @@ pub trait StateCatchup: Send + Sync { accounts: Vec, ) -> anyhow::Result> { self.backoff() - .retry(self, |provider| { - async { + .retry(self, |provider, retry| { + let accounts = &accounts; + async move { let tree = provider - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, &accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await .map_err(|err| { err.context(format!( @@ -85,6 +94,7 @@ pub trait StateCatchup: Send + Sync { /// Try to fetch and remember the blocks frontier, failing without retrying if unable. async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -100,8 +110,8 @@ pub trait StateCatchup: Send + Sync { mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { self.backoff() - .retry(mt, |mt| { - self.try_remember_blocks_merkle_tree(instance, height, view, mt) + .retry(mt, |mt, retry| { + self.try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .map_err(|err| err.context("fetching frontier")) .boxed() }) @@ -110,6 +120,7 @@ pub trait StateCatchup: Send + Sync { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result; @@ -118,9 +129,9 @@ pub trait StateCatchup: Send + Sync { commitment: Commitment, ) -> anyhow::Result { self.backoff() - .retry(self, |provider| { + .retry(self, |provider, retry| { provider - .try_fetch_chain_config(commitment) + .try_fetch_chain_config(retry, commitment) .map_err(|err| err.context("fetching chain config")) .boxed() }) @@ -135,6 +146,7 @@ pub trait StateCatchup: Send + Sync { impl StateCatchup for Box { async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -142,7 +154,14 @@ impl StateCatchup for Box { accounts: &[FeeAccount], ) -> anyhow::Result { (**self) - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await } @@ -161,13 +180,14 @@ impl StateCatchup for Box { async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { (**self) - .try_remember_blocks_merkle_tree(instance, height, view, mt) + .try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .await } @@ -185,9 +205,10 @@ impl StateCatchup for Box { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { - (**self).try_fetch_chain_config(commitment).await + (**self).try_fetch_chain_config(retry, commitment).await } async fn fetch_chain_config( @@ -210,6 +231,7 @@ impl StateCatchup for Box { impl StateCatchup for Arc { async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -217,7 +239,14 @@ impl StateCatchup for Arc { accounts: &[FeeAccount], ) -> anyhow::Result { (**self) - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await } @@ -236,13 +265,14 @@ impl StateCatchup for Arc { async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, mt: &mut BlockMerkleTree, ) -> anyhow::Result<()> { (**self) - .try_remember_blocks_merkle_tree(instance, height, view, mt) + .try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .await } @@ -260,9 +290,10 @@ impl StateCatchup for Arc { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { - (**self).try_fetch_chain_config(commitment).await + (**self).try_fetch_chain_config(retry, commitment).await } async fn fetch_chain_config( @@ -287,6 +318,7 @@ impl StateCatchup for Vec { #[tracing::instrument(skip(self, instance))] async fn try_fetch_accounts( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -295,7 +327,14 @@ impl StateCatchup for Vec { ) -> anyhow::Result { for provider in self { match provider - .try_fetch_accounts(instance, height, view, fee_merkle_tree_root, accounts) + .try_fetch_accounts( + retry, + instance, + height, + view, + fee_merkle_tree_root, + accounts, + ) .await { Ok(tree) => return Ok(tree), @@ -315,6 +354,7 @@ impl StateCatchup for Vec { #[tracing::instrument(skip(self, instance, mt))] async fn try_remember_blocks_merkle_tree( &self, + retry: usize, instance: &NodeState, height: u64, view: ViewNumber, @@ -322,7 +362,7 @@ impl StateCatchup for Vec { ) -> anyhow::Result<()> { for provider in self { match provider - .try_remember_blocks_merkle_tree(instance, height, view, mt) + .try_remember_blocks_merkle_tree(retry, instance, height, view, mt) .await { Ok(()) => return Ok(()), @@ -340,10 +380,11 @@ impl StateCatchup for Vec { async fn try_fetch_chain_config( &self, + retry: usize, commitment: Commitment, ) -> anyhow::Result { for provider in self { - match provider.try_fetch_chain_config(commitment).await { + match provider.try_fetch_chain_config(retry, commitment).await { Ok(cf) => return Ok(cf), Err(err) => { tracing::info!( diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index 3f4cfd275..0c3df9579 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -286,12 +286,12 @@ impl BackoffParams { pub async fn retry( &self, mut state: S, - f: impl for<'a> Fn(&'a mut S) -> BoxFuture<'a, anyhow::Result>, + f: impl for<'a> Fn(&'a mut S, usize) -> BoxFuture<'a, anyhow::Result>, ) -> anyhow::Result { let mut delay = self.base; - loop { - match f(&mut state).await { - Ok(res) => break Ok(res), + for i in 0.. { + match f(&mut state, i).await { + Ok(res) => return Ok(res), Err(err) if self.disable => { return Err(err.context("Retryable operation failed; retries disabled")); } @@ -304,6 +304,7 @@ impl BackoffParams { } } } + unreachable!() } #[must_use] From 685a38067bca2a69f1512d5a5422e5d47c0664ab Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Tue, 10 Dec 2024 15:27:40 +0500 Subject: [PATCH 5/5] Clean up unnecessary files in the data directory. (#2380) --- data/chain_config.bin | Bin 138 -> 0 bytes data/chain_config.json | 7 - data/fee_info.bin | Bin 65 -> 0 bytes data/fee_info.json | 4 - data/header.bin | Bin 820 -> 0 bytes data/l1_block.bin | Bin 99 -> 0 bytes data/messages.bin | Bin 7396 -> 0 bytes data/messages.json | 427 ----------------------------------------- data/ns_table.bin | Bin 40 -> 0 bytes data/payload.bin | Bin 5260 -> 0 bytes data/payload.json | 6 - data/transaction.bin | Bin 276 -> 0 bytes data/tx_index.bin | Bin 12 -> 0 bytes data/tx_index.json | 14 -- 14 files changed, 458 deletions(-) delete mode 100644 data/chain_config.bin delete mode 100644 data/chain_config.json delete mode 100644 data/fee_info.bin delete mode 100644 data/fee_info.json delete mode 100644 data/header.bin delete mode 100644 data/l1_block.bin delete mode 100644 data/messages.bin delete mode 100644 data/messages.json delete mode 100644 data/ns_table.bin delete mode 100644 data/payload.bin delete mode 100644 data/payload.json delete mode 100644 data/transaction.bin delete mode 100644 data/tx_index.bin delete mode 100644 data/tx_index.json diff --git a/data/chain_config.bin b/data/chain_config.bin deleted file mode 100644 index 1888124061b63c869043901b0fbb3c798a46812a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 138 lcmZQzU}RupfB=IEi$p_91`P<087^wTsD)q~5C=%t0RUJ56I%cP diff --git a/data/chain_config.json b/data/chain_config.json deleted file mode 100644 index 4f0650f18..000000000 --- a/data/chain_config.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "base_fee": "0", - "chain_id": "35353", - "fee_contract": "0x0000000000000000000000000000000000000000", - "fee_recipient": "0x0000000000000000000000000000000000000000", - "max_block_size": "10240" -} diff --git a/data/fee_info.bin b/data/fee_info.bin deleted file mode 100644 index debddd9d05c01a23a5e61ee0b8eceba3543522c3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65 zcmZQzU}Vr@fB=JvG-J!O6th%Q!^FfC3yUM^TWtuS{tLIR z8pVYtHI8p*_pfM=jTd9j!uv=XszY!gN2(4cl;r`X!Pw0Ca93YMrTU0t!&75Dk?$NW zZ_ZR2dnd{rKLJl-FTiLDVwUXPcaC*?PzFVl2JH|6(vfVn= zjF77|e41@Ts1$RWBBn|CMJYM&hGXa^=((0K^dB_y$*x#`?fQX1aMBB6jMF;Ez%wt6 z)tV}*APXT_pj66#e_QRghGa~E&;na!Ibbh|v=RrOi-8hMa}sO@xr!$S7&xw3APxi= zM=~yIN)%J7gRqiW!WB@*6mgl)DJbo^z!V1p5>ukV2k96URxwP$P|dZ;y5fSTH~5ZM diff --git a/data/l1_block.bin b/data/l1_block.bin deleted file mode 100644 index ecffa9fdcf72c2cb0f6f8956d48b2fc21ea4f381..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 99 qcmZQzU}UIffB;q~ZBSuiYUTvz7#JEE0|m@2EEAKGQ&Q7Nt!#y`OL;eO2nuFuNEv>EmkMEh)3V zPtNvx+aF}BXC&*y>f?JlFzdvJ$7u`7zMQ+6XS%9?f4~IiX_ID1eQr?WZI$zTy6p5@ z{wwTWGPd_x&(GERzUch{=l-ce2a8X3n5uG1-`M;=JF+9Han-)(pRNns`u$ma>Gj>3 zb(dqG956NJ3xT==PO(8*1{D^GhL#K(5H2%Z)PO+?!8RZcKs8`ercfakDDB}G?4bZ< zGp>2Fm3{R;DO0Z%Z2n$fFMnKfLGp3!=7TcRgRBdW=gr;g3{@xurJX`OeO!Q{>+J9A z>lxzf>K6htYK!-qOM9}cl$rK^-{$V6W^!$1t7b@t!|VL4EsC=Oix})-_Jd6Fff@&k zG+);sZy#5%sRmFfLX-lU0tYIXDS~FE2I1Ie8zh76b-l)mA6ua;n>N|0(c%AA|zuwc=x*Sd_r|=|4-+8h1j@bK&>;h?@7&b408yW#+ zu|sK4eh>C^cYt!63#JroW`6tg@CFHwEvqlBEczg{Jgn~b8m}Fi`hkti;{QAo!!F(Z z#>8FLw|P-jR&w7Y>k=6WDc1MDJ=ZIDvk5p3rSb$6z(c{kw2m|UX^UV6gC?CJ5tJ~u zGL1$(IJ?5a1DzfU*#hQfP@ORp+)L|h3=0o*dMJbq%*~)y&QNggXj~F*uCu}-6^X`Q z)mXC6Nf$r5?4h2`43%X^G@2q`CA&X+$+6YeZFPc%k|?h*+@zt9MqrMGwOe6)5RE?^ zY+f*OE_l4vwOlE#PP8=p$>K6?GodD}c+I+|ZvW*+Z}-8C8cN{+YjVOJPcR%d&)9n? zMNEEc$ajY+9j+5g7`J}eWZGFX*W*m)H@4~t<o}um02Y*}Kwu~YB*>khhzE^LP~>U+jmsy#NAGReA$;Lr%YiIQ%f;!p zljB=mGvg9gZS$P&vn$-s8RQNy7)pT#DqLUyU*M(tzK?ydRlu!L@?TQsX-2^`CFae? zyNh=1)K3VTxinbzEyxf%s7bIMjGL#cj|;4^35p^R0MXDQ4a`R;P7-Abx-K-nLz)~^ z1jQ|&CL}WW4RaJI?U+GjP;@9j1=qw08-)49OjuN5zG8oEl-Yx0&kByYznD;Vy`@3o z`UJ1%#Mwub->4Hp-;Nz(I`Dq;1=WxLo~vF6E$x4E^4<-8>&M-n*tqvra!j~LnSVjm f56Gb)JQNZF!uy>7`RZI+pH~66ux>S-QIg5s>cgl+eBY@B6)T z&hLAf?|se;0DuI55Ci^K=>J`$e<1$@No(!z7RXgQ}%!6%8Cy@xWg5MWxEt z{PX2@CU)0Mm!qQB{m}5fiv9MNOQsFqRER@k>E80U{iz^~KP&pA=ugXc`G;cc?Er<@ ztZAjk)VdEr#PT8>ix-~NtO`=zp36fGg ziILQJWHwhI^h74S=B<6=G?ZJA?^xyZihpn}^vCOBLcJzleu1BT_8mdfR zR)MDocWdy5nzv`fb^;Y`pdJCW`DRWwQE=OeGf#t;kdMFnmzSN#SCjqA+q|q9Yc}4+8VT zN~pwE-_ZbaIZ?_BznB)L#}3Dp6-P|slI;UA1zwXG>|fL9v0NZSE9rO2eZW5`omSWp*`N}w-5i~j;Zv@LeXv;F zt_(^1N8v*Mri~qo-a!c5Bba@0pI&Tanez}o{K&OV%!@{C^Esz*ILE|6(uVYG-D0&uz%H z^Qdta*GDp*40|x7gLkiwmene=k53ku^Fl9RplyOT=);iG{!!(w@$%6hstCi+nk%d;lWO zWULB=cT}_(~NSl5gl{R3>6}jJV;%l1I8$-cb{W`@E*6VCa znJa6>*u___x4_2u@hH3~z~EM$el3H1`t2Kj^P*5ZQ@wxiV_jw2zWg75ZlEuau}$~sR< zjbgb_zZKfJki=Gk8L}FN@8o=wSkCabU^~9Q{xKZ_vtmhX==zN2Vjm0iR?{gzok6_FTyJU}ldt*%QEC zUyijwL|(-PWNW`wqp)B>$$}qOL@d1F5plPV)k36nMgH}o2&A%})0bn5)o1_2r#7=A z+owBni*X#~vDsP!CUlSHqhkK9gW9-6)2`cxseC{V^LqS9tr4f($+?(|;Y6~eVH}|} zn@>^i+PQ$V5j#6BgR||kz_~C;GlXS_`k9Nfbq}!_b>zP%~~(wLSe)nS_+9m3fZ?ZFGRZDg1Z912%&O>Wp z@e9wp%6w|yQ=Z55VU`0jEDtkq zuHu;Ir!ehn$*@`KCiLo01z3~4M*!b`aF_D^>5+%&6;&2eDm?xUWv4uZs|Dr`0u16+ z3th%2FYhkY*5CVMwXC)!451~lk<^0`p-ifkr3a}jmeAKE#285#uYjE+&I{_y89dI0 zd?HF`a_5NzvcUEvhAugUI$s+Au$5CXvvi~~=G^jVx$p-5SG}^Fe==MTp6`KW9q(QWS}YyDq!I6fvhJ9gmtvZ7`j+t@)vrCRHO1TVlo8B1Irm{s9iB zwlTHPo_S|xAXMZYnll@r&g&Qcg+ZX>Ud|*k65Bd27L_l%W{i0|(%Nq|n0UL>VzICm z*eg9;PFhv0ckmjed7+gq>4olcgE2}EBVn=5hJH*q{T;>0`l5mM*K=j9+4dGrn!&pa z&(Vqq?I8CM-iN+IpX31wkeqN|aFx*yx1T9`vi3e=0mbA58&_h%I5HPs<*&!a;+MX@ zu%O}5jb6v@=9d#Rdhj>_Qi7GRCjl(6tNIM5=UtkC_jAF~{3ws=W@Va>0~Un8aGB(^ zmj)FtJ(m&t0b}ugKtH0T_H=NUxCO|pGX@nan5AJWGgWKz_jTUn`MJ=Eb0!KkXB@Fn%BND#v^_&lH};*dg|&NXvJPhc8jcU?ekRI z$(k~L#>OU}(E;#5Y0)Vx^Hn)@h2MuO|0#Xywhv?vZrmu-P!W%A!HlM8n5rI4p8ycM z&L*aHYkBt18a}nJJ)5p-Q)JIsO!Hzes#Cl08JTpHD3203SayTV)wlVZYXr=(blo<3 zL9i?II%W4^RZQPLR}(WV_0!+-(y-E`Dc_^M4$k1yNs%861IQ-P(v^r8xKn++c0SVmR~NB~Ez9aXGTNof{AgOnqAaAQ zvV_B54eEQ|yUT|EMw8#6i5Z{pzRtKA|WJZ=e zE;IBLU8OIr>Zt>X>mDjX@%8MseX53&(mWI*UH!Ov+j1Eu#89}l+Qz$+j`#I$Ri?vj zTQ4BB{4*{~it&1*mDAQ~!2Gc-LflPeILJ}co1No&et=ZH%Xp}9V}!w)uZ4^3w;2tM z?4g^Co}BK~;I)?~l=PB+v;EJ5A%2MfZ%0=WaIsfI%?A7`)NIv8faDULqPv*9y%&FD z!L@V!TZQ(C$cjk7J?n0aJ)E$#*y<*~Eem-&WWyz2z~tA-L`<+Wl%wYZgq%H`e4yy} zw=&Sk7pdE+W4Eebpk2q8l4K=UuGHuq#SUv8a| zw(3c<8ga)9U6|6!EJRnpd=~!dQvLw{W9MaYHk^ux03%Uaxf;$&R9V&FRzEQ98Ll*K z94p+0YHj5fAK?hDB-~zytoLm;SjT;qAQ1n;J?(W8mi>FJ(U9mkGWIy8)04|al0#@( zpSBX4?IzkjpUzs9h%BCocFqn*NNmXbY(7X(s@3Vk7#eCP==sT!<$cyY^SvZAY9#MC z1zdTz+4pnZMOyn>TxQ{FBWi zs-^o`w&|$>GGTX(JbszD8xNw)Fvm2=DfI!;$&7mp6hOZ=!e0O)Zkd3vRxB+|zzOP~ zpz&U9%=izE`HP!#N=AJgR|bytWP8CgWcL}qnd+Dw{JDK`HHL|b-y;Ivd_ts?b<_GT zZ$X}K3&IVkUo#1~3ZLJ-^CkHtr-$1`U*l=Nj}!QEm@^sVE6)G_^~a*`wSwkl-=j29 zh&Po#+@9L7+lI{{RBYoT3IB}WgORK#de!CulYM8lvt`-)lSJ4$A-45fM+yaQ9k1{e zZe}3A@r-G42QmJM}lvq#NKrBDxf&p*<=0ZZ*6g*&AZ?+P~E9AsD&ukdyxt-}|eR9Bx;Y zxu46V|M|?|*UuJ*;D92Z2ET$<>RcL9;`Kyz@MK=v%tM9YD}Z)J1c8_dc}F52K(PMf z;};vdr(}k~CP&#rW@cV3>TyI)f>Ng}THjg;X+931s^xD<0CYVcqZMc>OAQ zUS7G7@re7qNvMY1sH28Nj1Qimc<=qlK*aHNL3=>%Z;!1ta5ImKu9b2pl^=sms&Cj4 zHk<`;h& zt~0(xWe~DCAIeX?j&m<{^!fUcN^7m2cAIr%L??B_d(9tp#`b11r<6r(0sJiI9%!*0{ZC7O7~z zz_RT#wShNZIMK8-TK|&qif#s$(cFAQM%~KqI2)};KLA8J$cy7uMPSjk8(o8gR60ds zjzgYedyIE~J_b)kh^Zeg=bc7yD*kW+ZtxxzmE+~ZnrG(vf0}Ui;&CfcC3`6Cf3Q(4 z3HN@9A)*^J;jWv;u&!ni=;_#;p3IAK6P$n-t1*1;NgZ-vW+F*76~l1o?{SI6lXZg_ zy(;?hX{p2-!;lboDw;NLq=r%LLLXlfV)D?75zph%ir4(m=NZ&NYOn8^!65?l7u(@S;Mq6OQ&5xqZRcicfiE z!p-R+K+wN7C;7aqTyfl2|LyIZUgHyc@E}`?a6T#|y!k7NC2FNe0ZrUm?%J9=YHT-^ z?zTE*hr_lo$oneuw9)2Jc1kS-uu zFR%s}!x1>_&{{WqaxqJ=6S9ThKo`aBGJb=ZfMVIFlA#4;jauMrQKGf32ZuRHu2-Rx zdE2%9@R@pB%{khZxx;u$SL!=BI-U#8b*vQG1(7Ip3+6CeXKNG6Wf2PeB8)jm#Gil0 zDSy%<&>7}l_}joy8M!HT<$G>kBu22RV&XaS`q^#)DxOMkQ08xiL_jpO3nQ_h_Qa|R zWT(GP_m)foD-0r~oAc;C2Rl;z*B}4szgh7d(F>sZ7Z!!}ZA4Od006Wet?0iE@_zv4 C5ckpm diff --git a/data/payload.json b/data/payload.json deleted file mode 100644 index 2387c7aa4..000000000 --- a/data/payload.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "ns_table": { - "bytes": "AwAAAO7/wAAcBgAAobC5EkAOAABksAWiXBQAAA==" - }, - "raw_payload": "BgAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAF2eIXFMYkyrHrYISOdpUPxLq2dgmfDilwq9z71rSHb2xcbmS2nim/Ak33mSXGqypeXatObNfgf921YXBvra9Z/oPCq6AEfRmM5J+ZKvcX4URjpPP/AvIPcF3PmLmF/kgyq1cPnT/lQPW5Z9Nb4ka3C1IqLMBqqmNxca+jwWrXOXtaj/VTrLdtRleItPSrcC9ShFVqMk4NJrFe9i6LiuDYom7JdoIeitb+le/x4cl3koNvqFcmVY3013dIXijRwffVUQHWATDTBLMJRTtTiA9gTg+Dl4gsboJzOzlKjaqEgNUouP/E/YMHdTVgUvz3TyvPOCxePq9xg0yeNT8BDKA+kPUd5oezPOoFRQum/hkAidqKl453/BTMpijENApkbiy/s0FV7kT2CyBp+kMHNdVO7IK4JyCO4Mj1HBxtcFaNjhXNg7QAXB0c8t/9E4DhvDU1YX9UB8mSqCRrq35BXOFHRDmMFaXj/WWWMZkeUHeAQZF7Y8k4cHnImIhdZg532eg6QcPNwuHgCdLQOnoZteoZWKoMloJvpS1xcRusvh8Y4wV4L1eURLQLq8eva5UTFs6LzRyPQZ6EOlZvrMTlunRwCGBT5lGiyX3wWGY2m2JWfhR+rlccHcUlwb43d40VkXY3oVg4eR4nWS9eBaqkjHXYuQaX0INhMYV+byHlUn7wJkqiHjp3hY0xts2yr/mjCkUeScU9vG7C+x5lg8r+RDFox1OaF7s9/jYcDEXLZzfyrX5iE8x9ot6A+E6W5rp1Ek/ymkRvK3rr7DmTvGrRpgNfSndlb2FsH7Vu1lnYsE/mNEMwwbhjTrRI1qoWRNdkrJYUZsTOw4NM6SfN0wvZ4PVtTlFb97KvWd5b/y5f9WoGEbqunvtky528CtZyn3e44a0Rd9ECWhYifqb9L7R+D8yGm8NePNpyiAx2TdlX4Su4eN/VeHV04moVpH8ygXKKhMNX2KpDqu7sVB4T5/wUZNOCyvdzoQsZ7ot8JVOol4qLvegxsm/VbK4tQKmzeC/2tXleHPHSl8Nxsd+chgyAGJTuAH8EdCO3vJHSf7BqWzcDAeyz7tWe2sDkW0XJAco+uepk6bISMHItKAdO9tsaf+XMOdrg2lPghOYD2baTpvaF9d9jFZL+P5T+2LwRbKkID3WSC2vt7Ipq5KKsrMoGAzZO+AMOIQJQbGhta8YHLaz7hWrrpfzThxipZnBaGmjeKUMP7aJD4pP0uA5VFbtLYuiXFFh86rq0ovBZFMIe1d99EmksRPhFS1TwG2BjBkvMCjNI7eHYU1/yXvB35UfCNr8hEiaMvfOZ4ZUIzoBymPQtTIhaW/vPGR/4cDt0anBEZ8/4tdyaswCW9tTzfecNzzlt37ymGIDDGsH5UAlM4Th5TDYi+G5rHreV6cwIkaKKwTkVmAwG1eG51EYyO9NKdV1ikjdemEonxX9E4h3ZYZziwlNWCigg5eeo96cqMWL562APWSiyp0ARGlKGLUbrTLeQ8vuEbOOZqiOffH7Lnp24hDGhuUiPeARi15iru4e7YMPjpLD3D38jZHh+wo1BcdA2LWsOGCstaPKaQIgdyDD2xt846nIv6g4ozCsPmVXIBjJ46xvZkFbmmKFXQ0ZfopR2CGqAkAqbo/2EklZzo0kPQNlqDb2gDGafEDC4ND+mBTilxklxx354LlwTO/70S/CJrIs2kQgBPwyblcOguKfJpSLE0aYScDmLbqqIXWJTI6cmmKUf+RvYb+dhp9HGXSVkUpilYqkzRNz+NEwVTH9Afqh3PguK1YEXKINBwm0FMEstgeulTBCErnGYB2+XUdT4tKvC3XnQdsFthPW4VJ0Z8Wo1zt3uOlktbdUePvXLkUbWvaWJppmFidgFy1XBfD5mUyHMuaO7OsXMZHgCcAdx0OhqNAGKhLeN14LeiA8LaBJEV6SGqLy0fh5Vj501jGT+Z8PJ7MKZ38o1q+zUY+q/NnbRnRROTZZCSSAjbo+VosW3arhJeNeFHlABDaalv1GxCY3KIq5/nPK6sPBkIRIIumGXMYDJ9N+vF7rlgeCB+VgQgAAAAAAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAM9kwJjeoi0EAJ6Ut9v7a/RueDt+T9TdqgR8o/fpx1WCUgOMGSTcPkOnP2NkuC2GLzV+zEIah3u4xPLjQ1lRZHr2yTAf2B3rtETPpqvYr065U/AobcXdWD3vdMivApM/emVjgu7qFboSXYCBPmr2qqCZndfCcilnWV9sAgns2CfpFFNcWxr6O0tvSChdJC0As3r1Edye2ep7erYCjTZlPFaSykV/QY+A60JWHr81x+0l8/sP/AM0X7uZPh8JSfuPPJJ3Shz8E0W0DHyKcJInJkus4JGGOSIF3xU2OtZTH2YDvpZmkhFKVTAiig7YzN5DAv+aRsmOiNVCbu4jDm6nMqkMjyEolLrO7aPLZUtXr6O8BAUCprmDe0o0TdCR+8pWgGLDQyf3z7Yvo4n7+oNS8j+D0R2zBqyToAnMwOkAecJYvUr27OlzXvEcmAGStoz1A+OngS+3gHIwAMQOrDjwiUra5NausPZ7CbfcuY3FBY8pFrBY/4IkTKql55InYmQTFhQHj5YLFbzHbdZOJpYybVk0OkntRe6OEEQVuo8ivUVHr3lmABW4LT+Xpceqie5i6dqh87b+TqqnNcyEUo66TFOPJWRjVeC8v/Hoy05WnM098DsBMjGvmrFe/9JNQowEnh1XzpjiUHGuHpJPlAo+221hkTaXRve2th1mXmFSzy5fVziicoKc0YVOM3qEmyI2u/ZFXYaGCmWeigQ0mq3JJuKGZXrcfxTivGHXoa4VwELGpxero1XnEwO11rggj/wg8N8jh1UHjdevZiHJOZUuGuve11dSqe5KUdG6ty0exPWWdMiohVJ+coEz98GheJDDGjBFOcF/q1rTcNWRVURpeDx8oxkQ3/E8fwxD76lG8snJjNip/CcFDiDI8orkn6cQsnMybACRf0kKzAAnitxWIuzuvVB99tJ/iDUD+U5fplD5w2ER2Q0kRVLYxEjwdNr/5nzJjHoVehLYupVYvT1hMF+8iQQJfyex4ZeSUeD48p7UEy9IG9JolNVKIH+YpGfR5WU/nfWbieWQtUR+DGGv6CQB20Pztd20I4z/+NcWnFOddFRT51GK8T++Ot7h5jKsaEReI5kKChnPUwA0ZB8GGmKfTC1UOUGESv2l+r5pfSp/st+mT0s9iLcIiBqxzazIlcsAFG/RjpS+UXTC0Vn6ut37XtxSGprSYc4zByNOHW40Wl27pErIEmpivjBgTuI1ta02YAlUcN/AMIDxBq9KcsZMCHnBb11fCU7s+EZCgKWPzXnpwbstf+zMOtMiwwJdiEKq//xbmrK3/JFuXfaomDzQJXsq4OIvGuDZuV3TcXn925y4sqvqwhjGzDZNi4JBzc4o3+4GiyrF6Lz6k0A7G6Ivvb4ZawTJ4IKO99+1rgTKrCrnlTU7DbZssIxSnwH2qW48y+FirOUYH9jwn4gfI6KYAbOmjejZsU7X44sHaGEzNjqXZ3Vb57gUW7vK/cSsuKXDnB8q5bCiudg9BuLz1cuPwAQ4d7NRRJujYeAF+H8Upsmer6sgfl5SvWLzxxkpkz4NcMdH5IJ8Ll9UwoD3/14DOz6EX2drBl075iejXBE0ViBty/CSt0FD1+3tvJbObV3dqq4PrGyEDdp1rg8pVrTtCq6BNEEWM/9n1WRPYFBI6nKMIyqXslNNssZJ4Eya1bwXPxVJxTevGKbsbKFAwFiLq0JzQe2BecfHKGpmvZAjJ54vXwaRVEmbuE6Sw47ec0s4jLAqZ8FMWcxCMho6vcINVeJFll0UZLZSTd8u67tBsHCrXsa54fwwTZ/tL2IayrBa2+y4zgDU6mf/PXBDxn5rUHcqK/LUwxdOvVvFst/HImU0ty8Y+F8eHkTocENVRVTNxPJ2UGQX8DXguv33WQ+kNjO7vY8V18BPTWZYC4Jf3GY2FvAGGr6jGbrAD9/Wb7zetKhS6zrbOnz2KOSJabYRpaNj85+5mALigd9unzZd2evLiX9CZCu/cVwZKoQZw6L4tKYwx5tA9M3qcKvZMNpDxURGStPwpiXnFiAa0+kZ+ugMH9IiEWQyCEInXBMUskVu7+rVs6HqFarEcQGhi3VSxl7S2/PG6K7mNzgBZzgWMeXpAixkWd6zjYESBLsFe6GsYHQjYNq4z0Jjv5Rjx3IOgtbNUZehBkcI+zmpbqf3tv1odmHtxpL/hVgBG2NvWdE+q2OtDaz3JOVZ27Pf6kf0glHhnaPpDDfbjfTygd7B4LFli5k+ED2bMc5264Oa+N2yWRLqhorqibx0L2Y/KzjOVh+qCinziGmfIGVMEhiMJB/SaAw4PMVg7dR+N0C4bHHJBQS8MPv6aydBmPYm9j9kh8ee6pFYqvXgwdXebkJS8j1D1vHb9g3ndhsr4J6x+u0GrlHz1J4cyivM4HG1GqixkW2pCiB3Fh4dEIUeJsUemr0hB9dMdbOC0WBn42Xc6v8MP9gnIrHhCueK7CleMPMjS38E5MgVas0GerM4cGLMBxvjL30zdcBAeMZdRaPR8WQbw5B4jwY93C6LKcxO+gWix02aa0i5ZDcHZY3IjZV7fxy3vvtEs/qxXMto8gLK8I7kjDAblmAIV5yleBUgQ8pzw6BkqbK2/AA6cJcAg7gJUVAA6wT9fsrAUgiMcWuf1+DSSVrBK/FXa7AY5X/PGPaWeZdOCLs1ncGLrVmOSNCFfCWT/5UYvh952uFc+/SghFcdEyQ2bznu9S55FpNFVQ29Ia10aeYMffzonMx+eT01BgAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAKLQrjdgptCHUAMU4Epx9OxmKmeD0v9LZ4yGEf3K9lgBYxrATdQAzMHtZ9GmmubMEgpTgWew4elHoHC7Cw9H889c2Z86VqYBGQ+8xlYRQjy9W5IQtu5SddmMIgUzoBx2itmtCyrthgf79qAInSyQwaJrKiuRRq16wwwoa5j3YQr3AOfrjR+kglN/K8prXBIN8YYJxLi+ntqQ2B5mvIY+3yy+mDsrI2BvS9dcfaAPi+x7V1AgwzXpEvA5Hh/rp3e4WfbH5cUjdubwUr8Qx/a7RUb9jMDZvBm2aKuX5p0kVpntV9nVt2p/fKJ4sXqguB2dHhcU3o5OWMyelM/3qFkLAFKWhRA8XRm7jjQAN7B1+ZtmaPqQIsSza0ToJiYzUR3K/5yy52gfFTwa5G4dpFqRFz+GSTBn/ZiMXLHfZcJwa/ILkNRGSqEj6TFBXYJPaMhrTz6JeA43PcD2x8OF6vKgunyd+HPh3Vi1Mm5UY0q8HHoiZpJ5g+kKKT3dAxS2LekFg+fsDdphawltbsoCeIDHKmLIQ0OEHoB4Y13bb96RzZHFKMRUgGSommgmsEA/yoS20e4wWU1uEv7DcM4PQ75QsntW1296s4DBkHAV0JIe/zetUPnMK/pj4DTlluQR74BPvYmCd6PNdwm8Nw9Y+luvmNm63A3BemUg0yOJV+xC6Lp6i8NuBuwpS2ICoQW/aKNgnBrX0msaee9YhzKiV0gBBjCibHdJNFnWDUBwYJQZc4fdJtgE5gIyTi7UMjgQz65QKbIrCv+jtFmkBN89VflXl1616Cn/9AxGXABurhggbm7QajQpBjJtLaFmSJxHc6/39v+bWHfQFs7Xoi3hDFf1Lk6G+x+972g0ThDI1eZVYaRckd6u1Hg/M8f5GC/J9Ib4RTGHIgAbFfs/CRA7LcSiGY9PEZxSUhMMRhiexEw4x0bkeb8lrsy14aBsM45krvzGWBlJMKRGMtuvsBA3dU+GCjU9+savW/QEIjhm7sWfkvKLcqVreKl1HFHdsB/iKMfHU5Lfd917BM9ip0oOe8y4GLDCscgs/6lUTzO0g30OzZVvlsdOk2pSNRbrbrnhy6HCGsxPxAYuyIJ93ZmHaZ+hEiq1KgISHq7bIxV7ffUEZIZ3+RyTWsvJGB82C027X1NXcrLRtmfJ9L9oUask0rqNz7elKvd8FFET54rUaDOx/dorXjMwoWuxHnViT+jYwsSA+73kiOA5H0Q4y0XT6F58/wo7JSR3hS2PQNPtsuLgX4ZIadYn4njN3w7Uocuz48iw/JYLIM+AlrW16RgduDVr0YhVIcMVAeeeizNM/wkfulryWH5LvDqJ6oH6ir9b5NX5f82FPE7o2p7shS1I02wV3zPpoqcOn4C1z9LC1V0twA4xSRyQc0nmcWZMpIR3/IkSIMRdMa/OB2WsJza/u+DOzJ6HcDfLtqNNIpm/ksVqJiQWkl48B2rCv26KDkRwXFoLopuT2KRlB1kRFew7lNRaTQesbiGMrYFd98AHjDJzuA6197J5c7XgRA7EVaFK72Rbzsudh2oHun/ninbEHNDvEtYrBlZszVJ48a+EL8X/r8gMwoi7Wq33dhwFaDmIezr0/A9RjJELXTEtczYQwt3SP/vkSqjqwbC0LNJVsvoqf8QpkTmfBFyEtakDYgSqOqAei92d3d1yBIq+HFQu4wy83N25zw3fSMJXJixQwp7QEIScYLxQP/9Q3NGiV1QtxGUMdu1bGVTvjcvmNx6sezGBjleuHUBYlF9+y8yVWc3dOyQ9EqEMomqZ4fkeBvNgElGjPTH2SA9nl0ZVKM4EgQ1cvRUYwBWtFYkrfcZquK/G6+/REOOB4Q+xIIcxbsrzCI0DYsFLllEVKIfWLbdJH2WwWINsP96rBhh3aLnTeM0srC3IuZfjI+wgpXnEyMqdWJ1rqjrkMBZHvjecJmWaUhFFmBE22RGJ544P1PsMp/pVEFPGMdb+sSuqhuA88XnuYKI8ENxLXXTHE/to1mQ9khDnpntjARASxeSAFmEddAmrfSoh9JV3P49YR8QBlWAyVPt/aw==" -} \ No newline at end of file diff --git a/data/transaction.bin b/data/transaction.bin deleted file mode 100644 index 9f086e3901eb0f4a9389665931f446fd213ae339..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 276 zcmV+v0qgz%0098*|G)qM000000RR910002KKOD+qRKk?cWWbo-qAdgfo|L!S`)l-W zcsqVi)ZMBCe53d2$5nz-1B@9Y+&)96KVxLLEru^Oe#}A|hkLlh^5a8UQDl1d$uJ+- z9qY72&!(%`uTHsB@F;D?-B>;EbjYs)lRtW8V}kDL6}l2#fPp@0_Nt(no!7#0DQ8(< zYyt`F*eB@}Q(Rjb`a4T+NGM$-EdaB6^%2~j+3I_GwgQbdWjt1r%0+)ckAUk!Rvy1K z$L%Ha`w#pBG+(=!J|785`;R=5cS;=m6GgNPe2Q?ACnig*;E{$oA_dN>wl-iVoP!-a`WannuZvh}A-WN_>_8 diff --git a/data/tx_index.bin b/data/tx_index.bin deleted file mode 100644 index efdcb0dd86702248c14b373f0fe39728c92a6005..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12 RcmZQzU}RuoU|?VcVgLX-00{s9 diff --git a/data/tx_index.json b/data/tx_index.json deleted file mode 100644 index 0c24f617f..000000000 --- a/data/tx_index.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "ns_index": [ - 2, - 0, - 0, - 0 - ], - "tx_index": [ - 5, - 0, - 0, - 0 - ] -} \ No newline at end of file