From 7cf96d98fdfe144ddae5bc352cafbe3d00ea37d8 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Fri, 6 Dec 2024 15:24:45 +0100 Subject: [PATCH] cfg-gate the legacy refactor (#269) * cfg-gate the refactor --- .github/workflows/build_nix.yml | 6 + .github/workflows/lint.yml | 5 + .github/workflows/test.yml | 7 +- Cargo.lock | 1 + Cargo.toml | 7 +- crates/legacy/Cargo.toml | 1 + crates/legacy/src/lib.rs | 21 +- crates/legacy/src/old/builder_state.rs | 1474 +++++ crates/legacy/src/old/lib.rs | 88 + crates/legacy/src/old/service.rs | 4953 +++++++++++++++++ crates/legacy/src/old/testing/basic_test.rs | 516 ++ .../src/old/testing/finalization_test.rs | 558 ++ crates/legacy/src/old/testing/mod.rs | 261 + .../src/{ => refactored}/block_size_limits.rs | 0 .../src/{ => refactored}/block_store.rs | 0 crates/legacy/src/refactored/lib.rs | 18 + crates/legacy/src/{ => refactored}/service.rs | 0 .../src/{ => refactored}/testing/basic.rs | 0 .../{ => refactored}/testing/block_size.rs | 0 .../{ => refactored}/testing/finalization.rs | 0 .../{ => refactored}/testing/integration.rs | 0 .../src/{ => refactored}/testing/mod.rs | 0 crates/shared/src/testing/constants.rs | 2 + 23 files changed, 7898 insertions(+), 20 deletions(-) create mode 100644 crates/legacy/src/old/builder_state.rs create mode 100644 crates/legacy/src/old/lib.rs create mode 100644 crates/legacy/src/old/service.rs create mode 100644 crates/legacy/src/old/testing/basic_test.rs create mode 100644 crates/legacy/src/old/testing/finalization_test.rs create mode 100644 crates/legacy/src/old/testing/mod.rs rename crates/legacy/src/{ => refactored}/block_size_limits.rs (100%) rename crates/legacy/src/{ => refactored}/block_store.rs (100%) create mode 100644 crates/legacy/src/refactored/lib.rs rename crates/legacy/src/{ => refactored}/service.rs (100%) rename crates/legacy/src/{ => refactored}/testing/basic.rs (100%) rename crates/legacy/src/{ => refactored}/testing/block_size.rs (100%) rename crates/legacy/src/{ => refactored}/testing/finalization.rs (100%) rename crates/legacy/src/{ => refactored}/testing/integration.rs (100%) rename crates/legacy/src/{ => refactored}/testing/mod.rs (100%) diff --git a/.github/workflows/build_nix.yml b/.github/workflows/build_nix.yml index 7dff7d5c..5a5f6eba 100644 --- a/.github/workflows/build_nix.yml +++ b/.github/workflows/build_nix.yml @@ -11,6 +11,12 @@ on: jobs: nix: + strategy: + matrix: + flags: ["--cfg legacy_builder_refactored", ""] + env: + RUSTFLAGS: "${{ matrix.flags }}" + RUSTDOCFLAGS: "${{ matrix.flags }}" runs-on: ubuntu-latest timeout-minutes: 90 steps: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 92f471d6..1610e0b7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,9 +14,14 @@ concurrency: jobs: lint: + strategy: + matrix: + flags: ["--cfg legacy_builder_refactored", ""] runs-on: ubuntu-latest env: + RUSTFLAGS: "${{ matrix.flags }}" + RUSTDOCFLAGS: "${{ matrix.flags }}" RUST_LOG: info steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e75c1e4f..3dd437d6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,9 +14,14 @@ concurrency: jobs: test: - runs-on: ubuntu-latest + strategy: + matrix: + flags: ["--cfg legacy_builder_refactored", ""] env: + RUSTFLAGS: "${{ matrix.flags }}" + RUSTDOCFLAGS: "${{ matrix.flags }}" RUST_LOG: info + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 diff --git a/Cargo.lock b/Cargo.lock index c264eb91..e03c32d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3100,6 +3100,7 @@ dependencies = [ "hotshot-task-impls", "hotshot-testing", "hotshot-types", + "lru 0.12.5", "marketplace-builder-shared", "num_cpus", "once_cell", diff --git a/Cargo.toml b/Cargo.toml index 1a0a619d..2ff5675e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ either = "1.13" futures = "0.3" jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfish", tag = "0.4.5" } hex = "0.4.3" +lru = "0.12" multimap = "0.10.0" nonempty-collections = "0.2" once_cell = "1.20" @@ -56,13 +57,15 @@ version = "0.1.56" edition = "2021" [workspace.lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage_nightly)'] } +unexpected_cfgs = { level = "warn", check-cfg = [ + 'cfg(legacy_builder_refactored, values(none()))', + 'cfg(coverage_nightly)', +] } [workspace.lints.clippy] disallowed-names = "deny" doc_markdown = "deny" doc_link_with_quotes = "deny" -clone_on_ref_ptr = "deny" [workspace.lints.rustdoc] broken_intra_doc_links = "deny" diff --git a/crates/legacy/Cargo.toml b/crates/legacy/Cargo.toml index a11ec3e5..6a2228c5 100644 --- a/crates/legacy/Cargo.toml +++ b/crates/legacy/Cargo.toml @@ -20,6 +20,7 @@ futures = { workspace = true } hotshot = { workspace = true } hotshot-builder-api = { workspace = true } hotshot-types = { workspace = true } +lru = { workspace = true } once_cell = { workspace = true } serde = { workspace = true, features = ["derive"] } sha2 = { workspace = true } diff --git a/crates/legacy/src/lib.rs b/crates/legacy/src/lib.rs index 67d926af..42842991 100644 --- a/crates/legacy/src/lib.rs +++ b/crates/legacy/src/lib.rs @@ -1,18 +1,5 @@ -//! Builder Phase 1 -//! It mainly provides three API services to hotshot proposers: -//! 1. Serves a proposer(leader)'s request to provide blocks information -//! 2. Serves a proposer(leader)'s request to provide the full blocks information -//! 3. Serves a proposer(leader)'s request to provide the block header information -//! -//! It also provides one API service to external users: -//! 1. Serves a user's request to submit a private transaction -#![cfg_attr(coverage_nightly, feature(coverage_attribute))] +#[cfg_attr(legacy_builder_refactored, path = "refactored/lib.rs")] +#[cfg_attr(not(legacy_builder_refactored), path = "old/lib.rs")] +mod implementation; -pub mod block_size_limits; -pub mod block_store; -pub mod service; - -// tracking the testing -#[cfg(test)] -#[cfg_attr(coverage_nightly, coverage(off))] -pub mod testing; +pub use implementation::*; diff --git a/crates/legacy/src/old/builder_state.rs b/crates/legacy/src/old/builder_state.rs new file mode 100644 index 00000000..dc36fe96 --- /dev/null +++ b/crates/legacy/src/old/builder_state.rs @@ -0,0 +1,1474 @@ +use hotshot_types::{ + data::{DaProposal, Leaf, QuorumProposal}, + message::Proposal, + traits::block_contents::{BlockHeader, BlockPayload}, + traits::{ + block_contents::precompute_vid_commitment, + node_implementation::{ConsensusTime, NodeType}, + EncodeBytes, + }, + utils::BuilderCommitment, + vid::{VidCommitment, VidPrecomputeData}, +}; +use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; + +use committable::Commitment; + +use crate::implementation::LegacyCommit; +use crate::service::{GlobalState, ReceivedTransaction}; +use async_broadcast::broadcast; +use async_broadcast::Receiver as BroadcastReceiver; +use async_broadcast::Sender as BroadcastSender; +use async_lock::RwLock; +use core::panic; +use futures::StreamExt; + +use tokio::{ + spawn, + sync::{ + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot, + }, + task::spawn_blocking, + time::sleep, +}; + +use std::cmp::PartialEq; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::fmt::Debug; +use std::sync::Arc; +use std::time::Instant; +use std::{collections::hash_map::Entry, time::Duration}; + +pub type TxTimeStamp = u128; + +/// Enum to hold the different sources of the transaction +#[derive(Clone, Debug, PartialEq)] +pub enum TransactionSource { + External, // txn from the external source i.e private mempool + HotShot, // txn from the HotShot network i.e public mempool +} + +/// Decide Message to be put on the decide channel +#[derive(Clone, Debug)] +pub struct DecideMessage { + pub latest_decide_view_number: Types::View, +} +/// DA Proposal Message to be put on the da proposal channel +#[derive(Clone, Debug, PartialEq)] +pub struct DaProposalMessage { + pub proposal: Arc>>, + pub sender: Types::SignatureKey, +} +/// Quorum proposal message to be put on the quorum proposal channel +#[derive(Clone, Debug, PartialEq)] +pub struct QuorumProposalMessage { + pub proposal: Arc>>, + pub sender: Types::SignatureKey, +} +/// Request Message to be put on the request channel +#[derive(Clone, Debug)] +pub struct RequestMessage { + pub state_id: BuilderStateId, + pub response_channel: UnboundedSender, +} + +#[derive(Debug)] +pub enum TriggerStatus { + Start, + Exit, +} + +/// Response Message to be put on the response channel +#[derive(Debug)] +pub struct BuildBlockInfo { + pub id: BlockId, + pub block_size: u64, + pub offered_fee: u64, + pub block_payload: Types::BlockPayload, + pub metadata: <::BlockPayload as BlockPayload>::Metadata, + pub vid_trigger: oneshot::Sender, + pub vid_receiver: UnboundedReceiver<(VidCommitment, VidPrecomputeData)>, + // Could we have included more transactions, but chose not to? + pub truncated: bool, +} + +/// Response Message to be put on the response channel +#[derive(Debug, Clone)] +pub struct ResponseMessage { + pub builder_hash: BuilderCommitment, + pub block_size: u64, + pub offered_fee: u64, +} +#[derive(Debug, Clone)] +/// Enum to hold the status out of the decide event +pub enum Status { + ShouldExit, + ShouldContinue, +} + +#[derive(Debug, Clone, PartialEq)] +pub struct DAProposalInfo { + pub view_number: Types::View, + pub proposal: Arc>>, +} + +/// [`ALLOW_EMPTY_BLOCK_PERIOD`] is a constant that is used to determine the +/// number of future views that we will allow building empty blocks for. +/// +/// This value governs the ability for the Builder to prioritize finalizing +/// transactions by producing empty blocks rather than avoiding the creation +/// of them, following the proposal that contains transactions. +pub(crate) const ALLOW_EMPTY_BLOCK_PERIOD: u64 = 3; + +#[derive(Debug)] +pub struct BuilderState { + /// Recent included txs set while building blocks + pub included_txns: HashSet>, + + /// Old txs to be garbage collected + pub included_txns_old: HashSet>, + + /// Expiring txs to be garbage collected + pub included_txns_expiring: HashSet>, + + /// txns currently in the `tx_queue` + pub txns_in_queue: HashSet>, + + /// filtered queue of available transactions, taken from `tx_receiver` + pub tx_queue: VecDeque>>, + + /// `da_proposal_payload_commit` to (`da_proposal`, `node_count`) + #[allow(clippy::type_complexity)] + pub da_proposal_payload_commit_to_da_proposal: + HashMap<(BuilderCommitment, Types::View), DAProposalInfo>, + + /// `quorum_proposal_payload_commit` to `quorum_proposal` + #[allow(clippy::type_complexity)] + pub quorum_proposal_payload_commit_to_quorum_proposal: + HashMap<(BuilderCommitment, Types::View), Arc>>>, + + /// Spawned-from references to the parent block. + pub parent_block_references: ParentBlockReferences, + + // Channel Receivers for the HotShot events, Tx_receiver could also receive the external transactions + /// decide receiver + pub decide_receiver: BroadcastReceiver>, + + /// da proposal receiver + pub da_proposal_receiver: BroadcastReceiver>, + + /// quorum proposal receiver + pub quorum_proposal_receiver: BroadcastReceiver>, + + /// channel receiver for the block requests + pub req_receiver: BroadcastReceiver>, + + /// incoming stream of transactions + pub tx_receiver: BroadcastReceiver>>, + + /// global state handle, defined in the service.rs + pub global_state: Arc>>, + + /// locally spawned builder Commitements + pub builder_commitments: HashSet<(BuilderStateId, BuilderCommitment)>, + + /// timeout for maximising the txns in the block + pub maximize_txn_capture_timeout: Duration, + + /// constant fee that the builder will offer per byte of data sequenced + pub base_fee: u64, + + /// validated state that is required for a proposal to be considered valid. Needed for the + /// purposes of building a valid block payload within the sequencer. + pub validated_state: Arc, + + /// instance state to enfoce `max_block_size` + pub instance_state: Arc, + + /// txn garbage collection every duration time + pub txn_garbage_collect_duration: Duration, + + /// time of next garbage collection for txns + pub next_txn_garbage_collect_time: Instant, + + /// `allow_empty_block_until` is a variable that dictates the time until which + /// a builder should stop producing empty blocks. This is done specifically + /// to allow for faster finalization of previous blocks that have had + /// transactions included in them. + pub allow_empty_block_until: Option, +} + +/// [`best_builder_states_to_extend`] is a utility function that is used to +/// in order to determine which [`BuilderState`]s are the best fit to extend +/// from. +/// +/// This function is designed to inspect the current state of the global state +/// in order to determine which [`BuilderState`]s are the best fit to extend +/// from. We only want to use information from [`GlobalState`] as otherwise +/// we would have some insider knowledge unique to our specific [`BuilderState`] +/// rather than knowledge that is available to all [`BuilderState`]s. In fact, +/// in order to ensure this, this function lives outside of the [`BuilderState`] +/// itself. +/// +/// In an ideal circumstance the best [`BuilderState`] to extend from is going to +/// be the one that is immediately preceding the [`QuorumProposal`] that we are +/// attempting to extend from. However, if all we know is the view number of +/// the [`QuorumProposal`] that we are attempting to extend from, then we may end +/// up in a scenario where we have multiple [`BuilderState`]s that are all equally +/// valid to extend from. When this happens, we have the potential for a data +/// race. +/// +/// The primary cause of this has to due with the interface of the +/// [`ProxyGlobalState`](crate::service::ProxyGlobalState)'s API. In general, +/// we want to be able to retrieve a [`BuilderState`] via the [`BuilderStateId`]. +/// The [`BuilderStateId`] only references a [`ViewNumber`](hotshot_types::data::ViewNumber) +/// and a [`VidCommitment`] While this information is available in the [`QuorumProposal`], +/// it only helps us to rule out [`BuilderState`]s that already exist. +/// It does **NOT** help us to pick a [`BuilderState`] that is the best fit to extend from. +/// +/// This is where the `justify_qc` comes in to consideration. The `justify_qc` +/// contains the previous [`ViewNumber`](hotshot_types::data::ViewNumber) that is +/// being extended from, and in addition it also contains the previous [`Commitment>`] +/// that is being built on top of. Since our [`BuilderState`]s store identifying +/// information that contains this same `leaf_commit` we can compare these +/// directly to ensure that we are extending from the correct [`BuilderState`]. +/// +/// This function determines the best [`BuilderState`] in the following steps: +/// +/// 1. If we have a [`BuilderState`] that is already spawned for the current +/// [`QuorumProposal`], then we should should return no states, as one already +/// exists. This will prevent us from attempting to spawn duplicate +/// [`BuilderState`]s. +/// 2. Attempt to find all [`BuilderState`]s that are recorded within +/// [`GlobalState`] that have matching view number and leaf commitments. There +/// *should* only be one of these. But all would be valid extension points. +/// 3. If we can't find any [`BuilderState`]s that match the view number +/// and leaf commitment, then we should return for the maximum stored view +/// number that is smaller than the current [`QuorumProposal`]. +/// 4. If there is is only one [`BuilderState`] stored in the [`GlobalState`], then +/// we should return that [`BuilderState`] as the best fit. +/// 5. If none of the other criteria match, we return an empty result as it is +/// unclear what to do in this case. +/// +/// > Note: Any time this function returns more than a single entry in its +/// > [HashSet] result, there is a potential for a race condition. This is +/// > because there are multiple [BuilderState]s that are equally valid to +/// > extend from. This race could be avoided by just picking one of the +/// > entries in the resulting [HashSet], but this is not done here in order +/// > to allow us to highlight the possibility of the race. +async fn best_builder_states_to_extend( + quorum_proposal: Arc>>, + global_state: Arc>>, +) -> HashSet> { + let current_view_number = quorum_proposal.data.view_number; + let current_commitment = quorum_proposal.data.block_header.payload_commitment(); + let current_builder_state_id = BuilderStateId:: { + parent_commitment: current_commitment, + parent_view: current_view_number, + }; + + let global_state_read_lock = global_state.read_arc().await; + + // The first step is to check if we already have a spawned [BuilderState]. + // If we do, then we should indicate that there is no best fit, as we + // don't want to spawn another [BuilderState]. + if global_state_read_lock + .spawned_builder_states + .contains_key(¤t_builder_state_id) + { + // We already have a spawned [BuilderState] for this proposal. + // So we should just ignore it. + return HashSet::new(); + } + + // Next we want to see if there is an immediate match for a [BuilderState] + // that we can extend from. This is the most ideal situation, as it + // implies that we are extending from the correct [BuilderState]. + // We do this by checking the `justify_qc` stored within the + // [QuorumProposal], and checking it against the current spawned + // [BuilderState]s + let justify_qc = &quorum_proposal.data.justify_qc; + let existing_states: HashSet<_> = global_state_read_lock + .spawned_builder_states + .iter() + .filter( + |(_, (parent_block_references, _))| match parent_block_references { + None => false, + Some(parent_block_references) => { + parent_block_references.leaf_commit == justify_qc.data.leaf_commit + && parent_block_references.view_number == justify_qc.view_number + } + }, + ) + .map(|(builder_state_id, _)| builder_state_id.clone()) + .collect(); + + // If we found any matching [BuilderState]s, then we should return them + // as the best fit. + if !existing_states.is_empty() { + return existing_states; + } + + // At this point, we don't have any "ideal" matches or scenarios. So we + // need to look for a suitable fall-back. The best fallback condition to + // start with is any [BuilderState] that has the maximum spawned view + // number whose value is smaller than the current [QuorumProposal]. + let maximum_stored_view_number_smaller_than_quorum_proposal = global_state_read_lock + .spawned_builder_states + .keys() + .map(|builder_state_id| *builder_state_id.parent_view) + .filter(|view_number| view_number < ¤t_view_number) + .max(); + + // If we have a maximum view number that meets our criteria, then we should + // return all [BuilderStateId]s that match this view number. + // This can lead to multiple [BuilderStateId]s being returned. + if let Some(maximum_stored_view_number_smaller_than_quorum_proposal) = + maximum_stored_view_number_smaller_than_quorum_proposal + { + // If we are the maximum stored view number smaller than the quorum + // proposal's view number, then we are the best fit. + let mut result = HashSet::new(); + for builder_state_id in + global_state_read_lock + .spawned_builder_states + .keys() + .filter(|builder_state_id| { + builder_state_id.parent_view.u64() + == maximum_stored_view_number_smaller_than_quorum_proposal + }) + { + result.insert(builder_state_id.clone()); + } + return result; + } + + // This is our last ditch effort to continue making progress. If there is + // only one [BuilderState] active, then we should return that as the best + // fit, as it will be the only way we can continue making progress with + // the builder. + if global_state_read_lock.spawned_builder_states.len() == 1 { + let mut result = HashSet::new(); + for builder_state_id in global_state_read_lock.spawned_builder_states.keys() { + result.insert(builder_state_id.clone()); + } + return result; + } + + // This implies that there are only larger [BuilderState]s active than + // the one we are. This is weird, it implies that some sort of time + // travel has occurred view-wise. It is unclear what to do in this + // situation. + + HashSet::new() +} + +impl BuilderState { + /// Utility method that attempts to determine whether + /// we are among the best [`BuilderState`]s to extend from. + async fn am_i_the_best_builder_state_to_extend( + &self, + quorum_proposal: Arc>>, + ) -> bool { + let best_builder_states_to_extend = + best_builder_states_to_extend(quorum_proposal.clone(), self.global_state.clone()).await; + + tracing::debug!( + "{}@{} thinks these are the best builder states to extend from: {:?} for proposal {}@{}", + self.parent_block_references.vid_commitment, + self.parent_block_references.view_number.u64(), + best_builder_states_to_extend + .iter() + .map(|builder_state_id| format!( + "{}@{}", + builder_state_id.parent_commitment, + builder_state_id.parent_view.u64() + )) + .collect::>(), + quorum_proposal.data.block_header.payload_commitment(), + quorum_proposal.data.view_number.u64(), + ); + + // We are a best fit if we are contained within the returned set of + // best [BuilderState]s to extend from. + best_builder_states_to_extend.contains(&BuilderStateId { + parent_commitment: self.parent_block_references.vid_commitment, + parent_view: self.parent_block_references.view_number, + }) + } + + /// processing the DA proposal + #[tracing::instrument(skip_all, name = "process da proposal", + fields(builder_parent_block_references = %self.parent_block_references))] + async fn process_da_proposal(&mut self, da_msg: DaProposalMessage) { + tracing::debug!( + "Builder Received DA message for view {:?}", + da_msg.proposal.data.view_number + ); + + // we do not have the option to ignore DA proposals if we want to be able to handle failed view reorgs. + + // If the respective builder state exists to handle the request + let proposal = da_msg.proposal.clone(); + + // get the view number and encoded txns from the da_proposal_data + let view_number = proposal.data.view_number; + let encoded_txns = &proposal.data.encoded_transactions; + + let metadata = &proposal.data.metadata; + + // form a block payload from the encoded transactions + let block_payload = + >::from_bytes(encoded_txns, metadata); + // get the builder commitment from the block payload + let payload_builder_commitment = block_payload.builder_commitment(metadata); + + tracing::debug!( + "Extracted builder commitment from the da proposal: {:?}", + payload_builder_commitment + ); + + // form the DA proposal info + let da_proposal_info = DAProposalInfo { + view_number, + proposal, + }; + + let std::collections::hash_map::Entry::Vacant(e) = self + .da_proposal_payload_commit_to_da_proposal + .entry((payload_builder_commitment.clone(), view_number)) + else { + tracing::debug!("Payload commitment already exists in the da_proposal_payload_commit_to_da_proposal hashmap, so ignoring it"); + return; + }; + + // if we have matching da and quorum proposals, we can skip storing the one, and remove + // the other from storage, and call build_block with both, to save a little space. + + let Entry::Occupied(quorum_proposal) = self + .quorum_proposal_payload_commit_to_quorum_proposal + .entry((payload_builder_commitment.clone(), view_number)) + else { + e.insert(da_proposal_info); + return; + }; + + let quorum_proposal = quorum_proposal.remove(); + + // if we have a matching quorum proposal + // if (this is the correct parent or + // (the correct parent is missing and this is the highest view)) + // spawn a clone + if quorum_proposal.data.view_number != view_number { + tracing::debug!("Not spawning a clone despite matching DA and quorum payload commitments, as they corresponds to different view numbers"); + return; + } + + tracing::info!( + "Spawning a clone from process DA proposal for view number: {:?}", + view_number + ); + // remove this entry from quorum_proposal_payload_commit_to_quorum_proposal + self.quorum_proposal_payload_commit_to_quorum_proposal + .remove(&(payload_builder_commitment.clone(), view_number)); + self.spawn_clone_that_extends_self(da_proposal_info, quorum_proposal.clone()) + .await; + } + + /// processing the quorum proposal + //#[tracing::instrument(skip_all, name = "Process Quorum Proposal")] + #[tracing::instrument(skip_all, name = "process quorum proposal", + fields(builder_parent_block_references = %self.parent_block_references))] + async fn process_quorum_proposal(&mut self, quorum_msg: QuorumProposalMessage) { + tracing::debug!( + "Builder Received Quorum proposal message for view {:?}", + quorum_msg.proposal.data.view_number + ); + + // Two cases to handle: + // Case 1: Bootstrapping phase + // Case 2: No intended builder state exist + // To handle both cases, we can have the highest view number builder state running + // and only doing the insertion if and only if intended builder state for a particulat view is not present + // check the presence of quorum_proposal.data.view_number-1 in the spawned_builder_states list + let quorum_proposal = &quorum_msg.proposal; + let view_number = quorum_proposal.data.view_number; + let payload_builder_commitment = quorum_proposal.data.block_header.builder_commitment(); + + tracing::debug!( + "Extracted payload builder commitment from the quorum proposal: {:?}", + payload_builder_commitment + ); + + let std::collections::hash_map::Entry::Vacant(e) = self + .quorum_proposal_payload_commit_to_quorum_proposal + .entry((payload_builder_commitment.clone(), view_number)) + else { + tracing::debug!("Payload commitment already exists in the quorum_proposal_payload_commit_to_quorum_proposal hashmap, so ignoring it"); + return; + }; + + // first check whether vid_commitment exists in the quorum_proposal_payload_commit_to_quorum_proposal hashmap, if yer, ignore it, otherwise validate it and later insert in + // if we have matching da and quorum proposals, we can skip storing the one, and remove the other from storage, and call build_block with both, to save a little space. + let Entry::Occupied(da_proposal) = self + .da_proposal_payload_commit_to_da_proposal + .entry((payload_builder_commitment.clone(), view_number)) + else { + e.insert(quorum_proposal.clone()); + return; + }; + + let da_proposal_info = da_proposal.remove(); + // remove the entry from the da_proposal_payload_commit_to_da_proposal hashmap + self.da_proposal_payload_commit_to_da_proposal + .remove(&(payload_builder_commitment.clone(), view_number)); + + // also make sure we clone for the same view number( check incase payload commitments are same) + if da_proposal_info.view_number != view_number { + tracing::debug!("Not spawning a clone despite matching DA and quorum payload commitments, as they corresponds to different view numbers"); + return; + } + + tracing::info!( + "Spawning a clone from process quorum proposal for view number: {:?}", + view_number + ); + + self.spawn_clone_that_extends_self(da_proposal_info, quorum_proposal.clone()) + .await; + } + + /// A helper function that is used by both [`BuilderState::process_da_proposal`] + /// and [`BuilderState::process_quorum_proposal`] to spawn a new [`BuilderState`] + /// that extends from the current [`BuilderState`]. + /// + /// This helper function also adds additional checks in order to ensure + /// that the [`BuilderState`] that is being spawned is the best fit for the + /// [`QuorumProposal`] that is being extended from. + async fn spawn_clone_that_extends_self( + &mut self, + da_proposal_info: DAProposalInfo, + quorum_proposal: Arc>>, + ) { + if !self + .am_i_the_best_builder_state_to_extend(quorum_proposal.clone()) + .await + { + tracing::debug!( + "{} is not the best fit for forking, {}@{}, so ignoring the quorum proposal, and leaving it to another BuilderState", + self.parent_block_references, + quorum_proposal.data.block_header.payload_commitment(), + quorum_proposal.data.view_number.u64(), + ); + return; + } + + let (req_sender, req_receiver) = broadcast(self.req_receiver.capacity()); + + tracing::debug!( + "extending BuilderState with a clone from {} with new proposal {}@{}", + self.parent_block_references, + quorum_proposal.data.block_header.payload_commitment(), + quorum_proposal.data.view_number.u64() + ); + // We literally fork ourselves + self.clone_with_receiver(req_receiver) + .spawn_clone(da_proposal_info, quorum_proposal.clone(), req_sender) + .await; + } + + /// processing the decide event + #[tracing::instrument(skip_all, name = "process decide event", + fields(builder_parent_block_references = %self.parent_block_references))] + async fn process_decide_event(&mut self, decide_msg: DecideMessage) -> Option { + // Exit out all the builder states if their parent_block_references.view_number is less than the latest_decide_view_number + // The only exception is that we want to keep the highest view number builder state active to ensure that + // we have a builder state to handle the incoming DA and quorum proposals + let decide_view_number = decide_msg.latest_decide_view_number; + + let retained_view_cutoff = self + .global_state + .write_arc() + .await + .remove_handles(decide_view_number); + if self.parent_block_references.view_number < retained_view_cutoff { + tracing::info!( + "Decide@{:?}; Task@{:?} exiting; views < {:?} being reclaimed", + decide_view_number.u64(), + self.parent_block_references.view_number.u64(), + retained_view_cutoff.u64(), + ); + return Some(Status::ShouldExit); + } + tracing::info!( + "Decide@{:?}; Task@{:?} not exiting; views >= {:?} being retained", + decide_view_number.u64(), + self.parent_block_references.view_number.u64(), + retained_view_cutoff.u64(), + ); + + Some(Status::ShouldContinue) + } + + // spawn a clone of the builder state + #[tracing::instrument(skip_all, name = "spawn_clone", + fields(builder_parent_block_references = %self.parent_block_references))] + async fn spawn_clone( + mut self, + da_proposal_info: DAProposalInfo, + quorum_proposal: Arc>>, + req_sender: BroadcastSender>, + ) { + let leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); + + // We replace our parent_block_references with information from the + // quorum proposal. This is identifying the block that this specific + // instance of [BuilderState] is attempting to build for. + self.parent_block_references = ParentBlockReferences { + view_number: quorum_proposal.data.view_number, + vid_commitment: quorum_proposal.data.block_header.payload_commitment(), + leaf_commit: leaf.legacy_commit(), + builder_commitment: quorum_proposal.data.block_header.builder_commitment(), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }; + + let builder_state_id = BuilderStateId { + parent_commitment: self.parent_block_references.vid_commitment, + parent_view: self.parent_block_references.view_number, + }; + + { + // Let's ensure that we don't already have one of these BuilderStates + // running already. + + let global_state_read_lock = self.global_state.read_arc().await; + if global_state_read_lock + .spawned_builder_states + .contains_key(&builder_state_id) + { + tracing::warn!( + "Aborting spawn_clone, builder state already exists in spawned_builder_states: {:?}", + builder_state_id + ); + return; + } + } + + let encoded_txns = &da_proposal_info.proposal.data.encoded_transactions; + let metadata = &da_proposal_info.proposal.data.metadata; + + let block_payload = + >::from_bytes(encoded_txns, metadata); + let txn_commitments = block_payload.transaction_commitments(metadata); + + for tx in txn_commitments.iter() { + self.txns_in_queue.remove(tx); + } + + self.included_txns.extend(txn_commitments.iter()); + self.tx_queue + .retain(|tx| self.txns_in_queue.contains(&tx.commit)); + + if !txn_commitments.is_empty() { + self.allow_empty_block_until = Some(Types::View::new( + da_proposal_info.view_number.u64() + ALLOW_EMPTY_BLOCK_PERIOD, + )); + } + + // register the spawned builder state to spawned_builder_states in the global state + self.global_state.write_arc().await.register_builder_state( + BuilderStateId { + parent_commitment: self.parent_block_references.vid_commitment, + parent_view: self.parent_block_references.view_number, + }, + self.parent_block_references.clone(), + req_sender, + ); + + self.event_loop(); + } + + // build a block + #[tracing::instrument(skip_all, name = "build block", + fields(builder_parent_block_references = %self.parent_block_references))] + async fn build_block( + &mut self, + state_id: BuilderStateId, + ) -> Option> { + let timeout_after = Instant::now() + self.maximize_txn_capture_timeout; + let sleep_interval = self.maximize_txn_capture_timeout / 10; + while Instant::now() <= timeout_after { + self.collect_txns(timeout_after).await; + + if !self.tx_queue.is_empty() // we have transactions + || Instant::now() + sleep_interval > timeout_after + // we don't have time for another iteration + { + break; + } + + sleep(sleep_interval).await + } + + // should_prioritize_finalization is a flag that is used to determine + // whether we should return empty blocks or not. + + let should_prioritize_finalization = self + .allow_empty_block_until + .map(|until| state_id.parent_view < until) + .unwrap_or(false); + + if self.tx_queue.is_empty() && !should_prioritize_finalization { + // Don't build an empty block + return None; + } + + let max_block_size = self + .global_state + .read_arc() + .await + .block_size_limits + .max_block_size; + let transactions_to_include = self.tx_queue.iter().scan(0, |total_size, tx| { + let prev_size = *total_size; + *total_size += tx.len; + // We will include one transaction over our target block length + // if it's the first transaction in queue, otherwise we'd have a possible failure + // state where a single transaction larger than target block state is stuck in + // queue and we just build empty blocks forever + if *total_size >= max_block_size && prev_size != 0 { + None + } else { + Some(tx.tx.clone()) + } + }); + + let Ok((payload, metadata)) = + >::from_transactions( + transactions_to_include, + &self.validated_state, + &self.instance_state, + ) + .await + else { + tracing::warn!("build block, returning None"); + return None; + }; + + let builder_hash = payload.builder_commitment(&metadata); + // count the number of txns + let actual_txn_count = payload.num_transactions(&metadata); + + // Payload is empty despite us checking that tx_queue isn't empty earlier. + // + // This means that the block was truncated due to *sequencer* block length + // limits, which are different from our `max_block_size`. There's no good way + // for us to check for this in advance, so we detect transactions too big for + // the sequencer indirectly, by observing that we passed some transactions + // to `>::from_transactions`, but + // it returned an empty block. + // Thus we deduce that the first transaction in our queue is too big to *ever* + // be included, because it alone goes over sequencer's block size limit. + // We need to drop it and mark as "included" so that if we receive + // it again we don't even bother with it. + if actual_txn_count == 0 && !should_prioritize_finalization { + if let Some(txn) = self.tx_queue.pop_front() { + self.txns_in_queue.remove(&txn.commit); + self.included_txns.insert(txn.commit); + }; + return None; + } + + // insert the recently built block into the builder commitments + self.builder_commitments + .insert((state_id, builder_hash.clone())); + + let encoded_txns: Vec = payload.encode().to_vec(); + let block_size: u64 = encoded_txns.len() as u64; + let offered_fee: u64 = self.base_fee * block_size; + + // Get the number of nodes stored while processing the `claim_block_with_num_nodes` request + // or upon initialization. + let num_nodes = self.global_state.read_arc().await.num_nodes; + + let (trigger_send, trigger_recv) = oneshot::channel(); + + // spawn a task to calculate the VID commitment, and pass the handle to the global state + // later global state can await on it before replying to the proposer + let (unbounded_sender, unbounded_receiver) = unbounded_channel(); + #[allow(unused_must_use)] + spawn(async move { + let Ok(TriggerStatus::Start) = trigger_recv.await else { + return; + }; + + let join_handle = + spawn_blocking(move || precompute_vid_commitment(&encoded_txns, num_nodes)); + + let (vidc, pre_compute_data) = join_handle.await.unwrap(); + + unbounded_sender.send((vidc, pre_compute_data)); + }); + + tracing::info!( + "Builder view num {:?}, building block with {:?} txns, with builder hash {:?}", + self.parent_block_references.view_number, + actual_txn_count, + builder_hash + ); + + Some(BuildBlockInfo { + id: BlockId { + view: self.parent_block_references.view_number, + hash: builder_hash, + }, + block_size, + offered_fee, + block_payload: payload, + metadata, + vid_trigger: trigger_send, + vid_receiver: unbounded_receiver, + truncated: actual_txn_count < self.tx_queue.len(), + }) + } + + async fn process_block_request(&mut self, req: RequestMessage) { + // If a spawned clone is active then it will handle the request, otherwise the highest view num builder will handle + if req.state_id.parent_commitment != self.parent_block_references.vid_commitment + || req.state_id.parent_view != self.parent_block_references.view_number + { + tracing::debug!( + "Builder {:?} Requested Builder commitment does not match the built_from_view, so ignoring it", + self.parent_block_references.view_number + ); + return; + } + + let highest_view_num_builder_id = self + .global_state + .read_arc() + .await + .highest_view_num_builder_id + .clone(); + + if highest_view_num_builder_id.parent_view != self.parent_block_references.view_number { + tracing::debug!( + "Builder {:?} Requested Builder commitment does not match the highest_view_num_builder_id, so ignoring it", + self.parent_block_references.view_number + ); + return; + } + + tracing::info!( + "Request for parent {} handled by builder with view {:?}", + req.state_id, + self.parent_block_references.view_number, + ); + let response = self.build_block(req.state_id.clone()).await; + + let Some(response) = response else { + tracing::debug!("No response to send"); + return; + }; + + // form the response message + let response_msg = ResponseMessage { + builder_hash: response.id.hash.clone(), + block_size: response.block_size, + offered_fee: response.offered_fee, + }; + + let builder_hash = response.id.hash.clone(); + self.global_state.write_arc().await.update_global_state( + req.state_id.clone(), + response, + response_msg.clone(), + ); + + // ... and finally, send the response + if let Err(e) = req.response_channel.send(response_msg) { + tracing::warn!( + "Builder {:?} failed to send response to {:?} with builder hash {:?}, Err: {:?}", + self.parent_block_references.view_number, + req, + builder_hash, + e + ); + return; + } + + tracing::info!( + "Builder {:?} Sent response to the request{:?} with builder hash {:?}", + self.parent_block_references.view_number, + req, + builder_hash + ); + } + + #[tracing::instrument(skip_all, name = "event loop", + fields(builder_parent_block_references = %self.parent_block_references))] + pub fn event_loop(mut self) { + let _builder_handle = spawn(async move { + loop { + tracing::debug!( + "Builder {:?} event loop", + self.parent_block_references.view_number + ); + futures::select! { + req = self.req_receiver.next() => { + tracing::debug!("Received request msg in builder {:?}: {:?}", self.parent_block_references.view_number, req); + match req { + Some(req) => { + if let MessageType::RequestMessage(req) = req { + tracing::debug!( + "Received request msg in builder {:?}: {:?}", + self.parent_block_references.view_number, + req + ); + self.process_block_request(req).await; + } else { + tracing::warn!("Unexpected message on requests channel: {:?}", req); + } + } + None => { + tracing::warn!("No more request messages to consume"); + } + } + }, + da = self.da_proposal_receiver.next() => { + match da { + Some(da) => { + if let MessageType::DaProposalMessage(rda_msg) = da { + tracing::debug!("Received da proposal msg in builder {:?}:\n {:?}", self.parent_block_references, rda_msg.proposal.data.view_number); + self.process_da_proposal(rda_msg).await; + } else { + tracing::warn!("Unexpected message on da proposals channel: {:?}", da); + } + } + None => { + tracing::warn!("No more da proposal messages to consume"); + } + } + }, + quorum = self.quorum_proposal_receiver.next() => { + match quorum { + Some(quorum) => { + if let MessageType::QuorumProposalMessage(rquorum_msg) = quorum { + tracing::debug!("Received quorum proposal msg in builder {:?}:\n {:?} for view ", self.parent_block_references, rquorum_msg.proposal.data.view_number); + self.process_quorum_proposal(rquorum_msg).await; + } else { + tracing::warn!("Unexpected message on quorum proposals channel: {:?}", quorum); + } + } + None => { + tracing::warn!("No more quorum proposal messages to consume"); + } + } + }, + decide = self.decide_receiver.next() => { + match decide { + Some(decide) => { + if let MessageType::DecideMessage(rdecide_msg) = decide { + let latest_decide_view_num = rdecide_msg.latest_decide_view_number; + tracing::debug!("Received decide msg view {:?} in builder {:?}", + &latest_decide_view_num, + self.parent_block_references); + let decide_status = self.process_decide_event(rdecide_msg).await; + match decide_status{ + Some(Status::ShouldExit) => { + tracing::info!("Exiting builder {:?} with decide view {:?}", + self.parent_block_references, + &latest_decide_view_num); + return; + } + Some(Status::ShouldContinue) => { + tracing::debug!("Continuing builder {:?}", + self.parent_block_references); + continue; + } + None => { + tracing::warn!("decide_status was None; Continuing builder {:?}", + self.parent_block_references); + continue; + } + } + } else { + tracing::warn!("Unexpected message on decide channel: {:?}", decide); + } + } + None => { + tracing::warn!("No more decide messages to consume"); + } + } + }, + }; + } + }); + } +} +/// Unifies the possible messages that can be received by the builder +#[derive(Debug, Clone)] +pub enum MessageType { + DecideMessage(DecideMessage), + DaProposalMessage(DaProposalMessage), + QuorumProposalMessage(QuorumProposalMessage), + RequestMessage(RequestMessage), +} + +#[allow(clippy::too_many_arguments)] +impl BuilderState { + pub fn new( + parent_block_references: ParentBlockReferences, + decide_receiver: BroadcastReceiver>, + da_proposal_receiver: BroadcastReceiver>, + quorum_proposal_receiver: BroadcastReceiver>, + req_receiver: BroadcastReceiver>, + tx_receiver: BroadcastReceiver>>, + tx_queue: VecDeque>>, + global_state: Arc>>, + maximize_txn_capture_timeout: Duration, + base_fee: u64, + instance_state: Arc, + txn_garbage_collect_duration: Duration, + validated_state: Arc, + ) -> Self { + let txns_in_queue: HashSet<_> = tx_queue.iter().map(|tx| tx.commit).collect(); + BuilderState { + included_txns: HashSet::new(), + included_txns_old: HashSet::new(), + included_txns_expiring: HashSet::new(), + txns_in_queue, + parent_block_references, + decide_receiver, + da_proposal_receiver, + quorum_proposal_receiver, + req_receiver, + da_proposal_payload_commit_to_da_proposal: HashMap::new(), + quorum_proposal_payload_commit_to_quorum_proposal: HashMap::new(), + tx_receiver, + tx_queue, + global_state, + builder_commitments: HashSet::new(), + maximize_txn_capture_timeout, + base_fee, + instance_state, + txn_garbage_collect_duration, + next_txn_garbage_collect_time: Instant::now() + txn_garbage_collect_duration, + validated_state, + allow_empty_block_until: None, + } + } + pub fn clone_with_receiver(&self, req_receiver: BroadcastReceiver>) -> Self { + // Handle the garbage collection of txns + let ( + included_txns, + included_txns_old, + included_txns_expiring, + next_txn_garbage_collect_time, + ) = if Instant::now() >= self.next_txn_garbage_collect_time { + ( + HashSet::new(), + self.included_txns.clone(), + self.included_txns_old.clone(), + Instant::now() + self.txn_garbage_collect_duration, + ) + } else { + ( + self.included_txns.clone(), + self.included_txns_old.clone(), + self.included_txns_expiring.clone(), + self.next_txn_garbage_collect_time, + ) + }; + + BuilderState { + included_txns, + included_txns_old, + included_txns_expiring, + txns_in_queue: self.txns_in_queue.clone(), + parent_block_references: self.parent_block_references.clone(), + decide_receiver: self.decide_receiver.clone(), + da_proposal_receiver: self.da_proposal_receiver.clone(), + quorum_proposal_receiver: self.quorum_proposal_receiver.clone(), + req_receiver, + da_proposal_payload_commit_to_da_proposal: HashMap::new(), + quorum_proposal_payload_commit_to_quorum_proposal: HashMap::new(), + tx_receiver: self.tx_receiver.clone(), + tx_queue: self.tx_queue.clone(), + global_state: self.global_state.clone(), + builder_commitments: self.builder_commitments.clone(), + maximize_txn_capture_timeout: self.maximize_txn_capture_timeout, + base_fee: self.base_fee, + instance_state: self.instance_state.clone(), + txn_garbage_collect_duration: self.txn_garbage_collect_duration, + next_txn_garbage_collect_time, + validated_state: self.validated_state.clone(), + allow_empty_block_until: self.allow_empty_block_until, + } + } + + // collect outstanding transactions + async fn collect_txns(&mut self, timeout_after: Instant) { + while Instant::now() <= timeout_after { + match self.tx_receiver.try_recv() { + Ok(tx) => { + if self.included_txns.contains(&tx.commit) + || self.included_txns_old.contains(&tx.commit) + || self.included_txns_expiring.contains(&tx.commit) + || self.txns_in_queue.contains(&tx.commit) + { + continue; + } + self.txns_in_queue.insert(tx.commit); + self.tx_queue.push_back(tx); + } + Err(async_broadcast::TryRecvError::Empty) + | Err(async_broadcast::TryRecvError::Closed) => { + break; + } + Err(async_broadcast::TryRecvError::Overflowed(lost)) => { + tracing::warn!("Missed {lost} transactions due to backlog"); + continue; + } + } + } + } +} + +#[cfg(test)] +mod test { + use std::collections::HashMap; + + use async_broadcast::broadcast; + use committable::RawCommitmentBuilder; + use hotshot_example_types::block_types::TestTransaction; + use hotshot_example_types::node_types::TestTypes; + use hotshot_types::data::ViewNumber; + use hotshot_types::data::{Leaf, QuorumProposal}; + use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; + use hotshot_types::utils::BuilderCommitment; + use marketplace_builder_shared::testing::constants::TEST_NUM_NODES_IN_VID_COMPUTATION; + use tracing_subscriber::EnvFilter; + + use super::DAProposalInfo; + use super::MessageType; + use super::ParentBlockReferences; + use crate::testing::{calc_builder_commitment, calc_proposal_msg, create_builder_state}; + + /// This test the function `process_da_propsal`. + /// It checkes da_proposal_payload_commit_to_da_proposal change appropriately + /// when receiving a da proposal message. + /// This test also checks whether corresponding BuilderStateId is in global_state. + #[tokio::test] + async fn test_process_da_proposal() { + // Setup logging + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + tracing::info!("Testing the function `process_da_proposal` in `builder_state.rs`"); + + // Number of views to simulate + const NUM_ROUNDS: usize = 5; + // Capacity of broadcast channels + const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; + // Number of nodes on DA committee + const NUM_STORAGE_NODES: usize = TEST_NUM_NODES_IN_VID_COMPUTATION; + + // create builder_state without entering event loop + let (_senders, global_state, mut builder_state) = + create_builder_state(CHANNEL_CAPACITY, NUM_STORAGE_NODES).await; + + // randomly generate a transaction + let transactions = vec![TestTransaction::new(vec![1, 2, 3]); 3]; + let (_quorum_proposal, _quorum_proposal_msg, da_proposal_msg, builder_state_id) = + calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions.clone()).await; + + // sub-test one + // call process_da_proposal without matching quorum proposal message + // da_proposal_payload_commit_to_da_proposal should insert the message + let mut correct_da_proposal_payload_commit_to_da_proposal: HashMap< + (BuilderCommitment, ::View), + DAProposalInfo, + > = HashMap::new(); + let (payload_builder_commitment, da_proposal_info) = + calc_builder_commitment(da_proposal_msg.clone()).await; + + builder_state + .process_da_proposal(da_proposal_msg.clone()) + .await; + correct_da_proposal_payload_commit_to_da_proposal.insert( + ( + payload_builder_commitment, + da_proposal_msg.proposal.data.view_number, + ), + da_proposal_info, + ); + + assert_eq!( + builder_state.da_proposal_payload_commit_to_da_proposal, + correct_da_proposal_payload_commit_to_da_proposal + ); + // check global_state didn't change + if global_state + .read_arc() + .await + .spawned_builder_states + .contains_key(&builder_state_id) + { + panic!("global_state shouldn't have cooresponding builder_state_id without matching quorum proposal."); + } + + // sub-test two + // call process_da_proposal with the same msg again + // we should skip the process and everything should be the same + let transactions_1 = transactions.clone(); + let (_quorum_proposal_1, _quorum_proposal_msg_1, da_proposal_msg_1, builder_state_id_1) = + calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions_1).await; + builder_state + .process_da_proposal(da_proposal_msg_1.clone()) + .await; + assert_eq!( + builder_state.da_proposal_payload_commit_to_da_proposal, + correct_da_proposal_payload_commit_to_da_proposal + ); + // check global_state didn't change + if global_state + .read_arc() + .await + .spawned_builder_states + .contains_key(&builder_state_id_1) + { + panic!("global_state shouldn't have cooresponding builder_state_id without matching quorum proposal."); + } + + // sub-test three + // add the matching quorum proposal message with different tx + // and call process_da_proposal with this matching da proposal message and quorum proposal message + // we should spawn_clone here + // and check whether global_state has correct BuilderStateId + let transactions_2 = vec![TestTransaction::new(vec![1, 2, 3, 4]); 2]; + let (_quorum_proposal_2, quorum_proposal_msg_2, da_proposal_msg_2, builder_state_id_2) = + calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions_2).await; + + // process quorum proposal first, so that later when process_da_proposal we can directly call `build_block` and skip storage + builder_state + .process_quorum_proposal(quorum_proposal_msg_2.clone()) + .await; + + // process da proposal message and do the check + builder_state + .process_da_proposal(da_proposal_msg_2.clone()) + .await; + assert_eq!( + builder_state.da_proposal_payload_commit_to_da_proposal, + correct_da_proposal_payload_commit_to_da_proposal, + ); + // check global_state has this new builder_state_id + if global_state + .read_arc() + .await + .spawned_builder_states + .contains_key(&builder_state_id_2) + { + tracing::debug!("global_state updated successfully"); + } else { + panic!("global_state should have cooresponding builder_state_id as now we have matching quorum proposal."); + } + } + + /// This test the function `process_quorum_propsal`. + /// It checkes quorum_proposal_payload_commit_to_quorum_proposal change appropriately + /// when receiving a quorum proposal message. + /// This test also checks whether corresponding BuilderStateId is in global_state. + #[tokio::test] + async fn test_process_quorum_proposal() { + // Setup logging + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + tracing::info!("Testing the function `process_quorum_proposal` in `builder_state.rs`"); + + // Number of views to simulate + const NUM_ROUNDS: usize = 5; + // Capacity of broadcast channels + const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; + // Number of nodes on DA committee + const NUM_STORAGE_NODES: usize = TEST_NUM_NODES_IN_VID_COMPUTATION; + + // create builder_state without entering event loop + let (_senders, global_state, mut builder_state) = + create_builder_state(CHANNEL_CAPACITY, NUM_STORAGE_NODES).await; + + // randomly generate a transaction + let transactions = vec![TestTransaction::new(vec![1, 2, 3]); 3]; + let (_quorum_proposal, quorum_proposal_msg, _da_proposal_msg, builder_state_id) = + calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions.clone()).await; + + // sub-test one + // call process_quorum_proposal without matching da proposal message + // quorum_proposal_payload_commit_to_quorum_proposal should insert the message + let mut correct_quorum_proposal_payload_commit_to_quorum_proposal = HashMap::new(); + builder_state + .process_quorum_proposal(quorum_proposal_msg.clone()) + .await; + correct_quorum_proposal_payload_commit_to_quorum_proposal.insert( + ( + quorum_proposal_msg + .proposal + .data + .block_header + .builder_commitment + .clone(), + quorum_proposal_msg.proposal.data.view_number, + ), + quorum_proposal_msg.proposal, + ); + assert_eq!( + builder_state + .quorum_proposal_payload_commit_to_quorum_proposal + .clone(), + correct_quorum_proposal_payload_commit_to_quorum_proposal.clone() + ); + // check global_state didn't change + if global_state + .read_arc() + .await + .spawned_builder_states + .contains_key(&builder_state_id) + { + panic!("global_state shouldn't have cooresponding builder_state_id without matching quorum proposal."); + } + + // sub-test two + // add the matching da proposal message with different tx + // and call process_da_proposal with this matching quorum proposal message and quorum da message + // we should spawn_clone here + // and check whether global_state has correct BuilderStateId + let transactions_2 = vec![TestTransaction::new(vec![2, 3, 4]); 2]; + let (_quorum_proposal_2, quorum_proposal_msg_2, da_proposal_msg_2, builder_state_id_2) = + calc_proposal_msg(NUM_STORAGE_NODES, 0, None, transactions_2).await; + + // process da proposal message first, so that later when process_da_proposal we can directly call `build_block` and skip storage + builder_state + .process_da_proposal(da_proposal_msg_2.clone()) + .await; + + // process quorum proposal, and do the check + builder_state + .process_quorum_proposal(quorum_proposal_msg_2.clone()) + .await; + + assert_eq!( + builder_state + .quorum_proposal_payload_commit_to_quorum_proposal + .clone(), + correct_quorum_proposal_payload_commit_to_quorum_proposal.clone() + ); + + // check global_state has this new builder_state_id + if global_state + .read_arc() + .await + .spawned_builder_states + .contains_key(&builder_state_id_2) + { + tracing::debug!("global_state updated successfully"); + } else { + panic!("global_state should have cooresponding builder_state_id as now we have matching da proposal."); + } + } + + /// This test the function `process_decide_event`. + /// It checkes whether we exit out correct builder states when there's a decide event coming in. + /// This test also checks whether corresponding BuilderStateId is removed in global_state. + #[tokio::test] + async fn test_process_decide_event() { + // Setup logging + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + tracing::info!("Testing the builder core with multiple messages from the channels"); + + // Number of views to simulate + const NUM_ROUNDS: usize = 5; + // Number of transactions to submit per round + const NUM_TXNS_PER_ROUND: usize = 4; + // Capacity of broadcast channels + const CHANNEL_CAPACITY: usize = NUM_ROUNDS * 5; + // Number of nodes on DA committee + const NUM_STORAGE_NODES: usize = TEST_NUM_NODES_IN_VID_COMPUTATION; + + // create builder_state without entering event loop + let (_senders, global_state, mut builder_state) = + create_builder_state(CHANNEL_CAPACITY, NUM_STORAGE_NODES).await; + + // Transactions to send + let all_transactions = (0..NUM_ROUNDS) + .map(|round| { + (0..NUM_TXNS_PER_ROUND) + .map(|tx_num| TestTransaction::new(vec![round as u8, tx_num as u8])) + .collect::>() + }) + .collect::>(); + let mut prev_quorum_proposal: Option> = None; + // register some builder states for later decide event + #[allow(clippy::needless_range_loop)] + for round in 0..NUM_ROUNDS { + let transactions = all_transactions[round].clone(); + let (quorum_proposal, _quorum_proposal_msg, _da_proposal_msg, builder_state_id) = + calc_proposal_msg(NUM_STORAGE_NODES, round, prev_quorum_proposal, transactions) + .await; + prev_quorum_proposal = Some(quorum_proposal.clone()); + let (req_sender, _req_receiver) = broadcast(CHANNEL_CAPACITY); + let leaf: Leaf = Leaf::from_quorum_proposal(&quorum_proposal); + let leaf_commit = RawCommitmentBuilder::new("leaf commitment") + .u64_field("view number", leaf.view_number().u64()) + .u64_field("block number", leaf.height()) + .field("parent Leaf commitment", leaf.parent_commitment()) + .var_size_field( + "block payload commitment", + leaf.payload_commitment().as_ref(), + ) + .finalize(); + global_state.write_arc().await.register_builder_state( + builder_state_id, + ParentBlockReferences { + view_number: quorum_proposal.view_number, + vid_commitment: quorum_proposal.block_header.payload_commitment, + leaf_commit, + builder_commitment: quorum_proposal.block_header.builder_commitment, + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender, + ); + } + + // send out a decide event in a middle round + let latest_decide_view_number = ViewNumber::new(3); + + let decide_message = MessageType::DecideMessage(crate::builder_state::DecideMessage { + latest_decide_view_number, + }); + if let MessageType::DecideMessage(practice_decide_msg) = decide_message.clone() { + builder_state + .process_decide_event(practice_decide_msg.clone()) + .await; + } else { + panic!("Not a decide_message in correct format"); + } + // check whether spawned_builder_states have correct builder_state_id and already exit-ed builder_states older than decides + let current_spawned_builder_states = + global_state.read_arc().await.spawned_builder_states.clone(); + current_spawned_builder_states + .iter() + .for_each(|(builder_state_id, _)| { + assert!(builder_state_id.parent_view >= latest_decide_view_number) + }); + } +} diff --git a/crates/legacy/src/old/lib.rs b/crates/legacy/src/old/lib.rs new file mode 100644 index 00000000..09cf17a1 --- /dev/null +++ b/crates/legacy/src/old/lib.rs @@ -0,0 +1,88 @@ +// Copyright (c) 2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot Builder Protocol. +// + +// Builder Phase 1 +// It mainly provides three API services to hotshot proposers: +// 1. Serves a proposer(leader)'s request to provide blocks information +// 2. Serves a proposer(leader)'s request to provide the full blocks information +// 3. Serves a proposer(leader)'s request to provide the block header information + +// It also provides one API services external users: +// 1. Serves a user's request to submit a private transaction + +// providing the core services to support above API services +pub mod builder_state; + +// Core interaction with the HotShot network +pub mod service; + +// tracking the testing +#[cfg(test)] +pub mod testing; + +use hotshot_builder_api::v0_1::builder::BuildError; +use hotshot_types::traits::node_implementation::NodeType; +use tokio::sync::mpsc::UnboundedReceiver; + +/// `WaitAndKeep` is a helper enum that allows for the lazy polling of a single +/// value from an unbound receiver. +#[derive(Debug)] +pub enum WaitAndKeep { + Keep(T), + Wait(UnboundedReceiver), +} + +#[derive(Debug)] +pub(crate) enum WaitAndKeepGetError { + FailedToResolvedVidCommitmentFromChannel, +} + +impl From for BuildError { + fn from(e: WaitAndKeepGetError) -> Self { + match e { + WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel => { + BuildError::Error("failed to resolve VidCommitment from channel".to_string()) + } + } + } +} + +impl WaitAndKeep { + /// get will return a clone of the value that is already stored within the + /// value of `WaitAndKeep::Keep` if the value is already resolved. Otherwise + /// it will poll the next value from the channel and replace the locally + /// stored `WaitAndKeep::Wait` with the resolved value as a `WaitAndKeep::Keep`. + /// + /// Note: This pattern seems very similar to a Future, and ultimately + /// returns a future. It's not clear why this needs to be implemented + /// in such a way and not just implemented as a boxed future. + pub(crate) async fn get(&mut self) -> Result { + match self { + WaitAndKeep::Keep(t) => Ok(t.clone()), + WaitAndKeep::Wait(fut) => { + let got = fut + .recv() + .await + .ok_or(WaitAndKeepGetError::FailedToResolvedVidCommitmentFromChannel); + if let Ok(got) = &got { + let mut replace = WaitAndKeep::Keep(got.clone()); + core::mem::swap(self, &mut replace); + } + got + } + } + } +} + +// TODO: Update commitment calculation with the new `commit`. +// +trait LegacyCommit { + fn legacy_commit(&self) -> committable::Commitment>; +} + +impl LegacyCommit for hotshot_types::data::Leaf { + fn legacy_commit(&self) -> committable::Commitment> { + as committable::Committable>::commit(self) + } +} diff --git a/crates/legacy/src/old/service.rs b/crates/legacy/src/old/service.rs new file mode 100644 index 00000000..bd6e516f --- /dev/null +++ b/crates/legacy/src/old/service.rs @@ -0,0 +1,4953 @@ +use hotshot::types::Event; +use hotshot_builder_api::{ + v0_1::{ + block_info::{AvailableBlockData, AvailableBlockHeaderInput, AvailableBlockInfo}, + builder::BuildError, + data_source::{AcceptsTxnSubmits, BuilderDataSource}, + }, + v0_2::builder::TransactionStatus, +}; +use hotshot_types::{ + data::{DaProposal, Leaf, QuorumProposal}, + event::EventType, + message::Proposal, + traits::{ + block_contents::{BlockPayload, Transaction}, + node_implementation::{ConsensusTime, NodeType}, + signature_key::{BuilderSignatureKey, SignatureKey}, + }, + utils::BuilderCommitment, + vid::{VidCommitment, VidPrecomputeData}, +}; +use lru::LruCache; +use vbs::version::StaticVersionType; + +use marketplace_builder_shared::block::{BlockId, BuilderStateId, ParentBlockReferences}; + +use crate::builder_state::{MessageType, RequestMessage, ResponseMessage}; +use crate::{ + builder_state::{ + BuildBlockInfo, DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, + TriggerStatus, + }, + implementation::LegacyCommit as _, +}; +use crate::{WaitAndKeep, WaitAndKeepGetError}; +pub use async_broadcast::{broadcast, RecvError, TryRecvError}; +use async_broadcast::{Sender as BroadcastSender, TrySendError}; +use async_lock::RwLock; +use async_trait::async_trait; +use committable::{Commitment, Committable}; +use futures::stream::StreamExt; +use futures::{future::BoxFuture, Stream}; +use sha2::{Digest, Sha256}; +use std::collections::HashMap; +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Duration; +use std::{fmt::Display, time::Instant}; +use tagged_base64::TaggedBase64; +use tide_disco::method::ReadState; +use tokio::{ + sync::{mpsc::unbounded_channel, oneshot}, + time::{sleep, timeout}, +}; + +// We will not increment max block value if we aren't able to serve a response +// with a margin below [`ProxyGlobalState::max_api_waiting_time`] +// more than [`ProxyGlobalState::max_api_waiting_time`] / `VID_RESPONSE_TARGET_MARGIN_DIVISOR` +const VID_RESPONSE_TARGET_MARGIN_DIVISOR: u32 = 10; + +// It holds all the necessary information for a block +#[derive(Debug)] +pub struct BlockInfo { + pub block_payload: Types::BlockPayload, + pub metadata: <::BlockPayload as BlockPayload>::Metadata, + pub vid_trigger: Arc>>>, + pub vid_receiver: Arc>>, + pub offered_fee: u64, + // Could we have included more transactions with this block, but chose not to? + pub truncated: bool, +} + +/// [`ReceivedTransaction`] represents receipt information concerning a received +/// [`NodeType::Transaction`]. +#[derive(Debug)] +pub struct ReceivedTransaction { + // the transaction + pub tx: Types::Transaction, + // transaction's hash + pub commit: Commitment, + // transaction's esitmated length + pub len: u64, + // transaction's source + pub source: TransactionSource, + // received time + pub time_in: Instant, +} + +/// Adjustable limits for block size ceiled by +/// maximum block size allowed by the protocol +#[derive(Debug, Clone)] +pub struct BlockSizeLimits { + // maximum block size allowed by the protocol + pub protocol_max_block_size: u64, + // estimated maximum block size we can build in time + pub max_block_size: u64, + pub increment_period: Duration, + pub last_block_size_increment: Instant, +} + +impl BlockSizeLimits { + /// Never go lower than 10 kilobytes + pub const MAX_BLOCK_SIZE_FLOOR: u64 = 10_000; + /// When adjusting max block size, it will be decremented or incremented + /// by current value / `MAX_BLOCK_SIZE_CHANGE_DIVISOR` + pub const MAX_BLOCK_SIZE_CHANGE_DIVISOR: u64 = 10; + + pub fn new(protocol_max_block_size: u64, increment_period: Duration) -> Self { + Self { + protocol_max_block_size, + max_block_size: protocol_max_block_size, + increment_period, + last_block_size_increment: Instant::now(), + } + } + + /// If increment period has elapsed or `force` flag is set, + /// increment [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] + /// with [`Self::protocol_max_block_size`] as a ceiling + pub fn try_increment_block_size(&mut self, force: bool) { + if force || self.last_block_size_increment.elapsed() >= self.increment_period { + self.max_block_size = std::cmp::min( + self.max_block_size + + self + .max_block_size + .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), + self.protocol_max_block_size, + ); + self.last_block_size_increment = Instant::now(); + } + } + + /// Decrement [`Self::max_block_size`] by current value * [`Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR`] + /// with [`Self::MAX_BLOCK_SIZE_FLOOR`] as a floor + pub fn decrement_block_size(&mut self) { + self.max_block_size = std::cmp::max( + self.max_block_size + - self + .max_block_size + .div_ceil(Self::MAX_BLOCK_SIZE_CHANGE_DIVISOR), + Self::MAX_BLOCK_SIZE_FLOOR, + ); + } +} + +/// [`GlobalState`] represents the internalized state of the Builder service as +/// represented from its public facing API. +#[allow(clippy::type_complexity)] +#[derive(Debug)] +pub struct GlobalState { + // data store for the blocks + pub blocks: lru::LruCache, BlockInfo>, + + // registered builder states + pub spawned_builder_states: HashMap< + BuilderStateId, + ( + // This is provided as an Option for convenience with initialization. + // When we build the initial state, we don't necessarily want to + // have to generate a valid ParentBlockReferences object. As doing + // such would require a bit of setup. Additionally it would + // result in the call signature to `GlobalState::new` changing. + // However for every subsequent BuilderState, we expect this value + // to be populated. + Option>, + BroadcastSender>, + ), + >, + + // builder state -> last built block , it is used to respond the client + // if the req channel times out during get_available_blocks + pub builder_state_to_last_built_block: HashMap, ResponseMessage>, + + // sending a transaction from the hotshot/private mempool to the builder states + // NOTE: Currently, we don't differentiate between the transactions from the hotshot and the private mempool + pub tx_sender: BroadcastSender>>, + + // last garbage collected view number + pub last_garbage_collected_view_num: Types::View, + + // highest view running builder task + pub highest_view_num_builder_id: BuilderStateId, + + pub block_size_limits: BlockSizeLimits, + + // A mapping from transaction hash to its status + pub tx_status: RwLock, TransactionStatus>>, + + /// Number of nodes. + /// + /// Initial value may be updated by the `claim_block_with_num_nodes` endpoint. + pub num_nodes: usize, +} + +/// `GetChannelForMatchingBuilderError` is an error enum that represents the +/// class of possible errors that can be returned when calling +/// `get_channel_for_matching_builder_or_highest_view_builder` on a +/// `GlobalState`. These errors are used for internal representations for +/// consistency and testing, and do not leak beyond the `GlobalState` API. +/// As such, they intentionally do not implement traits for serialization. +#[derive(Debug)] +pub(crate) enum GetChannelForMatchingBuilderError { + NoBuilderStateFound, +} + +impl From for BuildError { + fn from(_error: GetChannelForMatchingBuilderError) -> Self { + BuildError::Error("No builder state found".to_string()) + } +} + +impl GlobalState { + /// Creates a new [`GlobalState`] with the given parameters. + /// The resulting [`GlobalState`] will have the given + /// `last_garbage_collected_view_num` as passed. Additionally, the + /// `highest_view_num_builder_id` will be set to a [`BuilderStateId`] + /// comprised of the given `bootstrapped_builder_state_id` and + /// `bootstrapped_view_num`. The `spawned_builder_states` will be created + /// with a single entry of the same [`BuilderStateId`] and the given + /// `bootstrap_sender`. + /// `protocol_max_block_size` is maximum block size allowed by the protocol, + /// e.g. `chain_config.max_block_size` for espresso-sequencer. + /// `max_block_size_increment_period` determines the interval between attempts + /// to increase the builder's block size limit if it is less than the protocol maximum. + #[allow(clippy::too_many_arguments)] + pub fn new( + bootstrap_sender: BroadcastSender>, + tx_sender: BroadcastSender>>, + bootstrapped_builder_state_id: VidCommitment, + bootstrapped_view_num: Types::View, + last_garbage_collected_view_num: Types::View, + max_block_size_increment_period: Duration, + protocol_max_block_size: u64, + num_nodes: usize, + max_txn_num: usize, + ) -> Self { + let mut spawned_builder_states = HashMap::new(); + let bootstrap_id = BuilderStateId { + parent_commitment: bootstrapped_builder_state_id, + parent_view: bootstrapped_view_num, + }; + spawned_builder_states.insert(bootstrap_id.clone(), (None, bootstrap_sender.clone())); + GlobalState { + blocks: LruCache::new(NonZeroUsize::new(256).unwrap()), + spawned_builder_states, + tx_sender, + last_garbage_collected_view_num, + builder_state_to_last_built_block: Default::default(), + highest_view_num_builder_id: bootstrap_id, + block_size_limits: BlockSizeLimits::new( + protocol_max_block_size, + max_block_size_increment_period, + ), + tx_status: RwLock::new(LruCache::new( + NonZeroUsize::new(max_txn_num).expect("max_txn_num must be greater than zero "), + )), + num_nodes, + } + } + + /// Associates the given [`BuilderStateId`] with + /// the given [`BroadcastSender`] in the [`GlobalState`]. + /// + /// Additionally, if the view of the [`BuilderStateId`] is greater than the + /// current highest view number, the [`BuilderStateId`] is set as the new + /// highest view number. + /// + /// There is potential here for data loss. Since we just blindly insert + /// the [`BuilderStateId`] and [`BroadcastSender`] into the hashmap, we could + /// potentially be overwriting an existing entry. This would result in + /// the loss of access to a [`BroadcastSender`], and could potentially + /// result in unexpected behavior. + pub fn register_builder_state( + &mut self, + parent_id: BuilderStateId, + built_from_proposed_block: ParentBlockReferences, + request_sender: BroadcastSender>, + ) { + // register the builder state + let previous_value = self.spawned_builder_states.insert( + parent_id.clone(), + (Some(built_from_proposed_block), request_sender), + ); + + if let Some(previous_value) = previous_value { + tracing::warn!( + "builder {parent_id} overwrote previous spawned_builder_state entry: {:?}", + previous_value + ); + } + + // keep track of the max view number + if parent_id.parent_view > self.highest_view_num_builder_id.parent_view { + tracing::info!("registering builder {parent_id} as highest",); + self.highest_view_num_builder_id = parent_id; + } else { + tracing::warn!( + "builder {parent_id} created; highest registered is {}", + self.highest_view_num_builder_id, + ); + } + } + + /// Ensures that the given [`BuildBlockInfo`]'d id + /// is within the [`GlobalState`]'s [`blocks`](GlobalState::blocks) LRU Cache. The cache stores the + /// [`BlockInfo`] associated with the given [`BuildBlockInfo`]'s id. However + /// if it already exists within the LRU cache, then the `BlockInfo` is not + /// updated. + /// + /// Additionally, the [`BuilderStateId`] is associated with the given + /// [`ResponseMessage`] in the [`Self::builder_state_to_last_built_block`] hashmap. + /// + /// No care or consideration is given to anything that may have been + /// stored with the same key in the [`Self::builder_state_to_last_built_block`]. + pub fn update_global_state( + &mut self, + state_id: BuilderStateId, + build_block_info: BuildBlockInfo, + response_msg: ResponseMessage, + ) { + let BuildBlockInfo { + id, + block_payload, + metadata, + vid_trigger, + vid_receiver, + offered_fee, + truncated, + .. + } = build_block_info; + + let previous_cache_entry = self.blocks.put( + id.clone(), + BlockInfo { + block_payload, + metadata, + vid_trigger: Arc::new(RwLock::new(Some(vid_trigger))), + vid_receiver: Arc::new(RwLock::new(WaitAndKeep::Wait(vid_receiver))), + offered_fee, + truncated, + }, + ); + + // update the builder state to last built block + let previous_builder_state_entry = self + .builder_state_to_last_built_block + .insert(state_id, response_msg); + + if let Some(previous_builder_state_entry) = previous_builder_state_entry { + tracing::warn!( + "block {id} overwrote previous block: {:?}. previous cache entry: {:?}", + previous_builder_state_entry, + previous_cache_entry + ); + } + } + + /// Cleans up the [`GlobalState`] by removing all + /// `spawned_builder_states` that have been stored, up to a derived + /// reference view. This cutoff point can be up to the given + /// `on_decide_view` so long as the provided value is less than or equal + /// to the `highest_view_num_builder_id`'s view stored on the state. + /// Beyond that, the state prefers to drop all `spawned_builder_states` + /// preceding the derived cutoff view. + /// + /// In addition the `last_garbage_collected_view_num` is updated to the + /// target cutoff view number for tracking purposes. The value returned + /// is the cutoff view number such that the returned value indicates the + /// point before which everything was cleaned up. + pub fn remove_handles(&mut self, on_decide_view: Types::View) -> Types::View { + // remove everything from the spawned builder states when view_num <= on_decide_view; + // if we don't have a highest view > decide, use highest view as cutoff. + let cutoff = std::cmp::min(self.highest_view_num_builder_id.parent_view, on_decide_view); + self.spawned_builder_states + .retain(|id, _| id.parent_view >= cutoff); + + let cutoff_u64 = cutoff.u64(); + let gc_view = if cutoff_u64 > 0 { cutoff_u64 - 1 } else { 0 }; + + self.last_garbage_collected_view_num = Types::View::new(gc_view); + + cutoff + } + + // private mempool submit txn + // Currently, we don't differentiate between the transactions from the hotshot and the private mempool + pub async fn submit_client_txns( + &self, + txns: Vec<::Transaction>, + ) -> Vec::Transaction>, BuildError>> { + handle_received_txns( + &self.tx_sender, + txns, + TransactionSource::External, + self.block_size_limits.max_block_size, + ) + .await + } + + // get transaction status + // return one of "pending", "sequenced", "rejected" or "unknown" + pub async fn txn_status( + &self, + txn_hash: Commitment<::Transaction>, + ) -> Result { + if let Some(status) = self.tx_status.write().await.get(&txn_hash) { + Ok(status.clone()) + } else { + Ok(TransactionStatus::Unknown) + } + } + + pub async fn set_txn_status( + &mut self, + txn_hash: Commitment<::Transaction>, + txn_status: TransactionStatus, + ) -> Result<(), BuildError> { + let mut write_guard = self.tx_status.write().await; + if write_guard.contains(&txn_hash) { + let old_status = write_guard.get(&txn_hash); + match old_status { + Some(TransactionStatus::Rejected { reason }) => { + tracing::debug!("Changing the status of a rejected transaction to status {:?}! The reason it is previously rejected is {:?}", txn_status, reason); + } + Some(TransactionStatus::Sequenced { leaf }) => { + let e = format!("Changing the status of a sequenced transaction to status {:?} is not allowed! The transaction is sequenced in leaf {:?}", txn_status, leaf); + tracing::error!(e); + return Err(BuildError::Error(e)); + } + _ => { + tracing::debug!( + "change status of transaction {txn_hash} from {:?} to {:?}", + old_status, + txn_status + ); + } + } + } else { + tracing::debug!( + "insert status of a first-seen transaction {txn_hash} : {:?}", + txn_status + ); + } + write_guard.put(txn_hash, txn_status); + Ok(()) + } + + /// Helper function that attempts to retrieve the broadcast sender for the given + /// [`BuilderStateId`]. If the sender does not exist, it will return the + /// broadcast sender for the for the hightest view number [`BuilderStateId`] + /// instead. + pub(crate) fn get_channel_for_matching_builder_or_highest_view_builder( + &self, + key: &BuilderStateId, + ) -> Result<&BroadcastSender>, GetChannelForMatchingBuilderError> { + if let Some(id_and_sender) = self.spawned_builder_states.get(key) { + tracing::info!("Got matching builder for parent {}", key); + Ok(&id_and_sender.1) + } else { + tracing::warn!( + "failed to recover builder for parent {}, using highest view num builder with {}", + key, + self.highest_view_num_builder_id, + ); + // get the sender for the highest view number builder + self.spawned_builder_states + .get(&self.highest_view_num_builder_id) + .map(|(_, sender)| sender) + .ok_or(GetChannelForMatchingBuilderError::NoBuilderStateFound) + } + } + + // check for the existence of the builder state for a view + pub fn check_builder_state_existence_for_a_view(&self, key: &Types::View) -> bool { + // iterate over the spawned builder states and check if the view number exists + self.spawned_builder_states + .iter() + .any(|(id, _)| id.parent_view == *key) + } + + pub fn should_view_handle_other_proposals( + &self, + builder_view: &Types::View, + proposal_view: &Types::View, + ) -> bool { + *builder_view == self.highest_view_num_builder_id.parent_view + && !self.check_builder_state_existence_for_a_view(proposal_view) + } +} + +#[derive(derive_more::Deref, derive_more::DerefMut)] +pub struct ProxyGlobalState { + #[deref(forward)] + #[deref_mut(forward)] + // global state + global_state: Arc>>, + + // identity keys for the builder + // May be ideal place as GlobalState interacts with hotshot apis + // and then can sign on responders as desired + builder_keys: ( + Types::BuilderSignatureKey, // pub key + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, // private key + ), + + // max waiting time to serve first api request + max_api_waiting_time: Duration, +} + +impl ProxyGlobalState { + pub fn new( + global_state: Arc>>, + builder_keys: ( + Types::BuilderSignatureKey, + <::BuilderSignatureKey as BuilderSignatureKey>::BuilderPrivateKey, + ), + max_api_waiting_time: Duration, + ) -> Self { + ProxyGlobalState { + global_state, + builder_keys, + max_api_waiting_time, + } + } +} + +/// `AvailableBlocksError` is an error enum that represents the class of possible +/// errors that can be returned when calling `available_blocks` on a +/// `ProxyGlobalState`. These errors are used for internal representations +/// for consistency and testing, and do not leak beyond the `ProxyGlobalState` +/// API. As such, they intentionally do not implement traits for serialization. +#[derive(Debug)] +enum AvailableBlocksError { + SignatureValidationFailed, + RequestForAvailableViewThatHasAlreadyBeenDecided, + SigningBlockFailed( + <::BuilderSignatureKey as BuilderSignatureKey>::SignError, + ), + GetChannelForMatchingBuilderError(GetChannelForMatchingBuilderError), + NoBlocksAvailable, + ChannelUnexpectedlyClosed, +} + +impl From for AvailableBlocksError { + fn from(error: GetChannelForMatchingBuilderError) -> Self { + AvailableBlocksError::GetChannelForMatchingBuilderError(error) + } +} + +impl From> for BuildError { + fn from(error: AvailableBlocksError) -> Self { + match error { + AvailableBlocksError::SignatureValidationFailed => { + BuildError::Error("Signature validation failed in get_available_blocks".to_string()) + } + AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided => { + BuildError::Error( + "Request for available blocks for a view that has already been decided." + .to_string(), + ) + } + AvailableBlocksError::SigningBlockFailed(e) => { + BuildError::Error(format!("Signing over block info failed: {:?}", e)) + } + AvailableBlocksError::GetChannelForMatchingBuilderError(e) => e.into(), + AvailableBlocksError::NoBlocksAvailable => { + BuildError::Error("No blocks available".to_string()) + } + AvailableBlocksError::ChannelUnexpectedlyClosed => { + BuildError::Error("Channel unexpectedly closed".to_string()) + } + } + } +} + +/// `ClaimBlockError` is an error enum that represents the class of possible +/// errors that can be returned when calling `claim_block` on a +/// `ProxyGlobalState`. These errors are used for internal representations +/// for consistency and testing, and do not leak beyond the `ProxyGlobalState` +/// API. As such, they intentionally do not implement traits for serialization. +#[derive(Debug)] +enum ClaimBlockError { + SignatureValidationFailed, + SigningCommitmentFailed( + <::BuilderSignatureKey as BuilderSignatureKey>::SignError, + ), + BlockDataNotFound, +} + +impl From> for BuildError { + fn from(error: ClaimBlockError) -> Self { + match error { + ClaimBlockError::SignatureValidationFailed => { + BuildError::Error("Signature validation failed in claim block".to_string()) + } + ClaimBlockError::SigningCommitmentFailed(e) => { + BuildError::Error(format!("Signing over builder commitment failed: {:?}", e)) + } + ClaimBlockError::BlockDataNotFound => { + BuildError::Error("Block data not found".to_string()) + } + } + } +} + +#[derive(Debug)] +enum ClaimBlockHeaderInputError { + SignatureValidationFailed, + BlockHeaderNotFound, + CouldNotGetVidInTime, + WaitAndKeepGetError(WaitAndKeepGetError), + FailedToSignVidCommitment( + <::BuilderSignatureKey as BuilderSignatureKey>::SignError, + ), + FailedToSignFeeInfo( + <::BuilderSignatureKey as BuilderSignatureKey>::SignError, + ), +} + +impl From> for BuildError { + fn from(error: ClaimBlockHeaderInputError) -> Self { + match error { + ClaimBlockHeaderInputError::SignatureValidationFailed => BuildError::Error( + "Signature validation failed in claim block header input".to_string(), + ), + ClaimBlockHeaderInputError::BlockHeaderNotFound => { + BuildError::Error("Block header not found".to_string()) + } + ClaimBlockHeaderInputError::CouldNotGetVidInTime => { + BuildError::Error("Couldn't get vid in time".to_string()) + } + ClaimBlockHeaderInputError::WaitAndKeepGetError(e) => e.into(), + ClaimBlockHeaderInputError::FailedToSignVidCommitment(e) => { + BuildError::Error(format!("Failed to sign VID commitment: {:?}", e)) + } + ClaimBlockHeaderInputError::FailedToSignFeeInfo(e) => { + BuildError::Error(format!("Failed to sign fee info: {:?}", e)) + } + } + } +} + +impl ProxyGlobalState { + async fn available_blocks_implementation( + &self, + for_parent: &VidCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &::PureAssembledSignatureType, + ) -> Result>, AvailableBlocksError> { + let starting_time = Instant::now(); + + let state_id = BuilderStateId { + parent_commitment: *for_parent, + parent_view: Types::View::new(view_number), + }; + + // verify the signature + if !sender.validate(signature, state_id.parent_commitment.as_ref()) { + tracing::error!("Signature validation failed in get_available_blocks"); + return Err(AvailableBlocksError::SignatureValidationFailed); + } + + tracing::info!("Requesting available blocks for {state_id}",); + + let view_num = state_id.parent_view; + // check in the local spawned builder states + // if it doesn't exist; there are three cases + // 1) it has already been garbage collected (view < decide) and we should return an error + // 2) it has not yet been created, and we should try to wait + // 3) we missed the triggering event, and should use the BuilderState with the highest available view + + { + // 1st case: Decide event received, and not bootstrapping. + // If this `BlockBuilder` hasn't been reaped, it should have been. + let global_state = self.global_state.read_arc().await; + if view_num < global_state.last_garbage_collected_view_num + && global_state.highest_view_num_builder_id.parent_view + != global_state.last_garbage_collected_view_num + { + tracing::warn!( + "Requesting for view {:?}, last decide-triggered cleanup on view {:?}, highest view num is {:?}", + view_num, + global_state.last_garbage_collected_view_num, + global_state.highest_view_num_builder_id.parent_view + ); + return Err(AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided); + } + } + + let (response_sender, mut response_receiver) = unbounded_channel(); + let req_msg = RequestMessage { + state_id: state_id.clone(), + response_channel: response_sender, + }; + let timeout_after = starting_time + self.max_api_waiting_time; + let check_duration = self.max_api_waiting_time / 10; + + let time_to_wait_for_matching_builder = starting_time + self.max_api_waiting_time / 2; + + let mut sent = false; + while Instant::now() < time_to_wait_for_matching_builder { + // try to broadcast the request to the correct builder state + let found_builder_state = { + let global_state_read_lock_guard = self.global_state.read_arc().await; + + global_state_read_lock_guard + .spawned_builder_states + .get(&state_id) + .cloned() + }; + + if let Some(id_and_sender) = found_builder_state { + tracing::info!( + "Got matching BlockBuilder for {state_id}, sending get_available_blocks request", + ); + + if let Err(e) = id_and_sender + .1 + .broadcast(MessageType::RequestMessage(req_msg.clone())) + .await + { + tracing::warn!("Error {e} sending get_available_blocks request for {state_id}",); + } + sent = true; + break; + } + + tracing::info!("Failed to get matching BlockBuilder for {state_id}, will try again",); + sleep(check_duration).await; + } + + if !sent { + // broadcast the request to the best fallback builder state + if let Err(e) = self + .global_state + .read_arc() + .await + .get_channel_for_matching_builder_or_highest_view_builder(&state_id)? + .broadcast(MessageType::RequestMessage(req_msg.clone())) + .await + { + tracing::warn!( + "Error {e} sending get_available_blocks request for parent {state_id}", + ); + } + } + + tracing::debug!("Waiting for response for get_available_blocks with parent {state_id}",); + + let response_received = loop { + match timeout(check_duration, response_receiver.recv()).await { + Err(toe) => { + if Instant::now() >= timeout_after { + tracing::debug!(%toe, "Couldn't get available blocks in time for parent {state_id}"); + // lookup into the builder_state_to_last_built_block, if it contains the result, return that otherwise return error + if let Some(last_built_block) = self + .global_state + .read_arc() + .await + .builder_state_to_last_built_block + .get(&state_id) + { + tracing::info!("Returning last built block for parent {state_id}",); + break Ok(last_built_block.clone()); + } + break Err(AvailableBlocksError::NoBlocksAvailable); + } + continue; + } + Ok(recv_attempt) => { + if recv_attempt.is_none() { + tracing::error!( + "Channel closed while getting available blocks for parent {state_id}" + ); + } + break recv_attempt + .ok_or_else(|| AvailableBlocksError::ChannelUnexpectedlyClosed); + } + } + }; + + match response_received { + Ok(response) => { + let (pub_key, sign_key) = self.builder_keys.clone(); + // sign over the block info + let signature_over_block_info = + ::BuilderSignatureKey::sign_block_info( + &sign_key, + response.block_size, + response.offered_fee, + &response.builder_hash, + ) + .map_err(AvailableBlocksError::SigningBlockFailed)?; + + // insert the block info into local hashmap + let initial_block_info = AvailableBlockInfo:: { + block_hash: response.builder_hash.clone(), + block_size: response.block_size, + offered_fee: response.offered_fee, + signature: signature_over_block_info, + sender: pub_key.clone(), + _phantom: Default::default(), + }; + tracing::info!( + "Sending available Block info response for {state_id} with block hash: {:?}", + response.builder_hash + ); + Ok(vec![initial_block_info]) + } + + // We failed to get available blocks + Err(e) => { + tracing::debug!("Failed to get available blocks for parent {state_id}",); + Err(e) + } + } + } + + async fn claim_block_implementation( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, ClaimBlockError> { + let block_id = BlockId { + hash: block_hash.clone(), + view: Types::View::new(view_number), + }; + + tracing::info!("Received request for claiming block {block_id}",); + // verify the signature + if !sender.validate(signature, block_id.hash.as_ref()) { + tracing::error!("Signature validation failed in claim block"); + return Err(ClaimBlockError::SignatureValidationFailed); + } + let (pub_key, sign_key) = self.builder_keys.clone(); + + let extracted_block_info_option = { + // We store this write lock guard separately to make it explicit + // that this will end up holding a lock for the duration of this + // closure. + // + // Additionally, we clone the properties from the block_info that + // end up being cloned if found anyway. Since we know this already + // we can perform the clone here to avoid holding the lock for + // longer than needed. + let mut global_state_write_lock_guard = self.global_state.write_arc().await; + let block_info_some = global_state_write_lock_guard.blocks.get(&block_id); + + block_info_some.map(|block_info| { + ( + block_info.vid_trigger.clone(), + block_info.block_payload.clone(), + block_info.metadata.clone(), + ) + }) + }; + + if let Some((vid_trigger, block_payload, metadata)) = extracted_block_info_option { + tracing::info!("Trying sending vid trigger info for {block_id}",); + + if let Some(trigger_writer) = vid_trigger.write().await.take() { + tracing::info!("Sending vid trigger for {block_id}"); + let _ = trigger_writer.send(TriggerStatus::Start); + tracing::info!("Sent vid trigger for {block_id}"); + } + tracing::info!("Done Trying sending vid trigger info for {block_id}",); + + // sign over the builder commitment, as the proposer can computer it based on provide block_payload + // and the metadata + let response_block_hash = block_payload.builder_commitment(&metadata); + let signature_over_builder_commitment = + ::BuilderSignatureKey::sign_builder_message( + &sign_key, + response_block_hash.as_ref(), + ) + .map_err(ClaimBlockError::SigningCommitmentFailed)?; + + let block_data = AvailableBlockData:: { + block_payload: block_payload.clone(), + metadata: metadata.clone(), + signature: signature_over_builder_commitment, + sender: pub_key.clone(), + }; + tracing::info!("Sending Claim Block data for {block_id}",); + Ok(block_data) + } else { + tracing::warn!("Claim Block not found"); + Err(ClaimBlockError::BlockDataNotFound) + } + } + + async fn claim_block_header_input_implementation( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, ClaimBlockHeaderInputError> { + let id = BlockId { + hash: block_hash.clone(), + view: Types::View::new(view_number), + }; + + tracing::info!("Received request for claiming block header input for block {id}"); + // verify the signature + if !sender.validate(signature, id.hash.as_ref()) { + tracing::error!("Signature validation failed in claim block header input"); + return Err(ClaimBlockHeaderInputError::SignatureValidationFailed); + } + let (pub_key, sign_key) = self.builder_keys.clone(); + + let extracted_block_info_option = { + // We store this write lock guard separately to make it explicit + // that this will end up holding a lock for the duration of this + // closure. + // + // Additionally, we clone the properties from the block_info that + // end up being cloned if found anyway. Since we know this already + // we can perform the clone here to avoid holding the lock for + // longer than needed. + let mut global_state_write_lock_guard = self.global_state.write_arc().await; + let block_info_some = global_state_write_lock_guard.blocks.get(&id); + + block_info_some.map(|block_info| { + ( + block_info.vid_receiver.clone(), + block_info.metadata.clone(), + block_info.offered_fee, + block_info.truncated, + ) + }) + }; + + if let Some((vid_receiver, metadata, offered_fee, truncated)) = extracted_block_info_option + { + tracing::info!("Waiting for vid commitment for block {id}"); + + let timeout_after = Instant::now() + self.max_api_waiting_time; + let check_duration = self.max_api_waiting_time / 10; + + let response_received = loop { + match timeout(check_duration, vid_receiver.write().await.get()).await { + Err(_toe) => { + if Instant::now() >= timeout_after { + tracing::warn!("Couldn't get vid commitment in time for block {id}",); + { + // we can't keep up with this block size, reduce max block size + self.global_state + .write_arc() + .await + .block_size_limits + .decrement_block_size(); + } + break Err(ClaimBlockHeaderInputError::CouldNotGetVidInTime); + } + continue; + } + Ok(recv_attempt) => { + if recv_attempt.is_err() { + tracing::error!( + "Channel closed while getting vid commitment for block {id}", + ); + } + break recv_attempt + .map_err(ClaimBlockHeaderInputError::WaitAndKeepGetError); + } + } + }; + + tracing::info!("Got vid commitment for block {id}",); + + // We got VID in time with margin left. + // Maybe we can handle bigger blocks? + if timeout_after.duration_since(Instant::now()) + > self.max_api_waiting_time / VID_RESPONSE_TARGET_MARGIN_DIVISOR + { + // Increase max block size + self.global_state + .write_arc() + .await + .block_size_limits + .try_increment_block_size(truncated); + } + + match response_received { + Ok((vid_commitment, vid_precompute_data)) => { + // sign over the vid commitment + let signature_over_vid_commitment = + ::BuilderSignatureKey::sign_builder_message( + &sign_key, + vid_commitment.as_ref(), + ) + .map_err(ClaimBlockHeaderInputError::FailedToSignVidCommitment)?; + + let signature_over_fee_info = Types::BuilderSignatureKey::sign_fee( + &sign_key, + offered_fee, + &metadata, + &vid_commitment, + ) + .map_err(ClaimBlockHeaderInputError::FailedToSignFeeInfo)?; + + let response = AvailableBlockHeaderInput:: { + vid_commitment, + vid_precompute_data, + fee_signature: signature_over_fee_info, + message_signature: signature_over_vid_commitment, + sender: pub_key.clone(), + }; + tracing::info!("Sending Claim Block Header Input response for {id}",); + Ok(response) + } + Err(err) => { + tracing::warn!("Claim Block Header Input not found"); + Err(err) + } + } + } else { + tracing::warn!("Claim Block Header Input not found"); + Err(ClaimBlockHeaderInputError::BlockHeaderNotFound) + } + } +} + +/* +Handling Builder API responses +*/ +#[async_trait] +impl BuilderDataSource for ProxyGlobalState +where + for<'a> <::PureAssembledSignatureType as TryFrom< + &'a TaggedBase64, + >>::Error: Display, + for<'a> >::Error: Display, +{ + async fn available_blocks( + &self, + for_parent: &VidCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &::PureAssembledSignatureType, + ) -> Result>, BuildError> { + Ok(self + .available_blocks_implementation(for_parent, view_number, sender, signature) + .await?) + } + + async fn claim_block( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + Ok(self + .claim_block_implementation(block_hash, view_number, sender, signature) + .await?) + } + + async fn claim_block_with_num_nodes( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: ::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + num_nodes: usize, + ) -> Result, BuildError> { + // Update the stored `num_nodes` with the given value, which will be used for VID computation. + self.global_state.write_arc().await.num_nodes = num_nodes; + + self.claim_block(block_hash, view_number, sender, signature) + .await + } + + async fn claim_block_header_input( + &self, + block_hash: &BuilderCommitment, + view_number: u64, + sender: Types::SignatureKey, + signature: &<::SignatureKey as SignatureKey>::PureAssembledSignatureType, + ) -> Result, BuildError> { + Ok(self + .claim_block_header_input_implementation(block_hash, view_number, sender, signature) + .await?) + } + + /// Returns the public key of the builder + async fn builder_address( + &self, + ) -> Result<::BuilderSignatureKey, BuildError> { + Ok(self.builder_keys.0.clone()) + } +} + +#[async_trait] +impl AcceptsTxnSubmits for ProxyGlobalState { + async fn submit_txns( + &self, + txns: Vec<::Transaction>, + ) -> Result::Transaction>>, BuildError> { + tracing::debug!( + "Submitting {:?} transactions to the builder states{:?}", + txns.len(), + txns.iter().map(|txn| txn.commit()).collect::>() + ); + let response = self + .global_state + .read_arc() + .await + .submit_client_txns(txns.clone()) + .await; + + let pairs: Vec<(Commitment<::Transaction>, Result<_, _>)> = (0..txns + .len()) + .map(|i| (txns[i].commit(), response[i].clone())) + .collect(); + let mut write_guard = self.global_state.write_arc().await; + for (txn_commit, res) in pairs { + if let Err(some) = res { + write_guard + .set_txn_status( + txn_commit, + TransactionStatus::Rejected { + reason: some.to_string(), + }, + ) + .await?; + } else { + write_guard + .set_txn_status(txn_commit, TransactionStatus::Pending) + .await?; + } + } + + tracing::debug!( + "Transaction submitted to the builder states, sending response: {:?}", + response + ); + + // NOTE: ideally we want to respond with original Vec + // instead of Result not to loose any information, + // but this requires changes to builder API + response.into_iter().collect() + } + + async fn txn_status( + &self, + txn_hash: Commitment<::Transaction>, + ) -> Result { + self.global_state + .read_arc() + .await + .txn_status(txn_hash) + .await + } +} +#[async_trait] +impl ReadState for ProxyGlobalState { + type State = ProxyGlobalState; + + async fn read( + &self, + op: impl Send + for<'a> FnOnce(&'a Self::State) -> BoxFuture<'a, T> + 'async_trait, + ) -> T { + op(self).await + } +} + +/* +Running Non-Permissioned Builder Service +*/ +pub async fn run_non_permissioned_standalone_builder_service< + Types: NodeType, + Ver: StaticVersionType, + S: Stream> + Unpin, +>( + // sending a DA proposal from the hotshot to the builder states + da_sender: BroadcastSender>, + + // sending a Quorum proposal from the hotshot to the builder states + quorum_sender: BroadcastSender>, + + // sending a Decide event from the hotshot to the builder states + decide_sender: BroadcastSender>, + + // HotShot event stream + hotshot_event_stream: S, + + // Global state + global_state: Arc>>, +) -> Result<(), anyhow::Error> { + let tx_sender = { + // This closure is likely unnecessary, but we want to play it safe + // with our RWLocks. + let global_state_read_lock_guard = global_state.read_arc().await; + global_state_read_lock_guard.tx_sender.clone() + }; + let mut hotshot_event_stream = std::pin::pin!(hotshot_event_stream); + + loop { + let Some(event) = hotshot_event_stream.next().await else { + anyhow::bail!("Event stream ended"); + }; + + match event.event { + EventType::Error { error } => { + tracing::error!("Error event in HotShot: {:?}", error); + } + // tx event + EventType::Transactions { transactions } => { + let max_block_size = { + // This closure is likely unnecessary, but we want + // to play it safe with our RWLocks. + let global_state_read_lock_guard = global_state.read_arc().await; + global_state_read_lock_guard + .block_size_limits + .max_block_size + }; + + let response = handle_received_txns( + &tx_sender, + transactions.clone(), + TransactionSource::HotShot, + max_block_size, + ) + .await; + let pairs: Vec<(Commitment<::Transaction>, Result<_, _>)> = (0 + ..transactions.len()) + .map(|i| (transactions[i].commit(), response[i].clone())) + .collect(); + let mut write_guard = global_state.write_arc().await; + for (txn_commit, res) in pairs { + if let Err(some) = res { + write_guard + .set_txn_status( + txn_commit, + TransactionStatus::Rejected { + reason: some.to_string(), + }, + ) + .await?; + } else { + write_guard + .set_txn_status(txn_commit, TransactionStatus::Pending) + .await?; + } + } + } + // decide event + EventType::Decide { + block_size: _, + leaf_chain, + qc: _, + } => { + let latest_decide_view_num = leaf_chain[0].leaf.view_number(); + handle_decide_event(&decide_sender, latest_decide_view_num).await; + } + // DA proposal event + EventType::DaProposal { proposal, sender } => { + handle_da_event(&da_sender, Arc::new(proposal), sender).await; + } + // QC proposal event + EventType::QuorumProposal { proposal, sender } => { + // get the leader for current view + handle_quorum_event(&quorum_sender, Arc::new(proposal), sender).await; + } + _ => { + tracing::debug!("Unhandled event from Builder"); + } + } + } +} + +/// [`HandleDaEventError`] represents the internal class of errors that can +/// occur when attempting to process an incoming da proposal event. More +/// specifically these are the class of error that can be returned from +/// [`handle_da_event_implementation`]. +#[derive(Debug)] +enum HandleDaEventError { + SignatureValidationFailed, + BroadcastFailed(async_broadcast::SendError>), +} + +/// [`handle_da_event`] is a utility function that will attempt to broadcast the +/// given `da_proposal` to the given `da_channel_sender` if the given details +/// pass validation checks, and the [`BroadcastSender`] `da_channel_sender` is +/// still open. +async fn handle_da_event( + da_channel_sender: &BroadcastSender>, + da_proposal: Arc>>, + sender: ::SignatureKey, +) { + // We're explicitly not inspecting this error, as this function is not + // expected to return an error or any indication of an error. + let _ = handle_da_event_implementation(da_channel_sender, da_proposal, sender).await; +} + +/// [`handle_da_event_implementation`] is a utility function that will attempt +/// to broadcast the given `da_proposal` to the given `da_channel_sender` if the +/// given details pass all relevant checks. +/// +/// There are only three conditions under which this will fail to send the +/// message via the given `da_channel_sender`, and they are all represented +/// via [`HandleDaEventError`]. They are as follows: +/// - [`HandleDaEventError::SignatureValidationFailed`]: The signature validation failed +/// - [`HandleDaEventError::BroadcastFailed`]: The broadcast failed as no receiver +/// is in place to receive the message +/// +/// This function is the implementation for [`handle_da_event`]. +async fn handle_da_event_implementation( + da_channel_sender: &BroadcastSender>, + da_proposal: Arc>>, + sender: ::SignatureKey, +) -> Result<(), HandleDaEventError> { + tracing::debug!( + "DaProposal: Leader: {:?} for the view: {:?}", + sender, + da_proposal.data.view_number + ); + + // get the encoded transactions hash + let encoded_txns_hash = Sha256::digest(&da_proposal.data.encoded_transactions); + // check if the sender is the leader and the signature is valid; if yes, broadcast the DA proposal + + if !sender.validate(&da_proposal.signature, &encoded_txns_hash) { + tracing::error!( + "Validation Failure on DaProposal for view {:?}: Leader: {:?}", + da_proposal.data.view_number, + sender + ); + return Err(HandleDaEventError::SignatureValidationFailed); + } + + let da_msg = DaProposalMessage:: { + proposal: da_proposal, + sender, + }; + + let view_number = da_msg.proposal.data.view_number; + tracing::debug!( + "Sending DA proposal to the builder states for view {:?}", + view_number + ); + + if let Err(e) = da_channel_sender + .broadcast(MessageType::DaProposalMessage(da_msg)) + .await + { + tracing::warn!( + "Error {e}, failed to send DA proposal to builder states for view {:?}", + view_number + ); + + return Err(HandleDaEventError::BroadcastFailed(e)); + } + + Ok(()) +} + +/// [`HandleQuorumEventError`] represents the internal class of errors that can +/// occur when attempting to process an incoming quorum proposal event. More +/// specifically these are the class of error that can be returned from +/// [`handle_quorum_event_implementation`]. +#[derive(Debug)] +enum HandleQuorumEventError { + SignatureValidationFailed, + BroadcastFailed(async_broadcast::SendError>), +} + +/// [`handle_quorum_event`] is a utility function that will attempt to broadcast the +/// given `quorum_proposal` to the given `quorum_channel_sender` if the given details +/// pass validation checks, and the [`BroadcastSender`] `quorum_channel_sender` is +/// still open. +async fn handle_quorum_event( + quorum_channel_sender: &BroadcastSender>, + quorum_proposal: Arc>>, + sender: ::SignatureKey, +) { + // We're explicitly not inspecting this error, as this function is not + // expected to return an error or any indication of an error. + let _ = + handle_quorum_event_implementation(quorum_channel_sender, quorum_proposal, sender).await; +} + +/// Utility function that will attempt to broadcast the given `quorum_proposal` +/// to the given `quorum_channel_sender` if the given details pass all relevant checks. +/// +/// There are only three conditions under which this will fail to send the +/// message via the given `quorum_channel_sender`, and they are all represented +/// via [`HandleQuorumEventError`]. They are as follows: +/// - [`HandleQuorumEventError::SignatureValidationFailed`]: The signature validation failed +/// - [`HandleQuorumEventError::BroadcastFailed`]: The broadcast failed as no receiver +/// is in place to receive the message +/// +/// This function is the implementation for [`handle_quorum_event`]. +async fn handle_quorum_event_implementation( + quorum_channel_sender: &BroadcastSender>, + quorum_proposal: Arc>>, + sender: ::SignatureKey, +) -> Result<(), HandleQuorumEventError> { + tracing::debug!( + "QuorumProposal: Leader: {:?} for the view: {:?}", + sender, + quorum_proposal.data.view_number + ); + + let leaf = Leaf::from_quorum_proposal(&quorum_proposal.data); + + if !sender.validate(&quorum_proposal.signature, leaf.legacy_commit().as_ref()) { + tracing::error!( + "Validation Failure on QuorumProposal for view {:?}: Leader for the current view: {:?}", + quorum_proposal.data.view_number, + sender + ); + return Err(HandleQuorumEventError::SignatureValidationFailed); + } + + let quorum_msg = QuorumProposalMessage:: { + proposal: quorum_proposal, + sender, + }; + let view_number = quorum_msg.proposal.data.view_number; + tracing::debug!( + "Sending Quorum proposal to the builder states for view {:?}", + view_number + ); + + if let Err(e) = quorum_channel_sender + .broadcast(MessageType::QuorumProposalMessage(quorum_msg)) + .await + { + tracing::warn!( + "Error {e}, failed to send Quorum proposal to builder states for view {:?}", + view_number + ); + return Err(HandleQuorumEventError::BroadcastFailed(e)); + } + + Ok(()) +} + +async fn handle_decide_event( + decide_channel_sender: &BroadcastSender>, + latest_decide_view_number: Types::View, +) { + let decide_msg: DecideMessage = DecideMessage:: { + latest_decide_view_number, + }; + tracing::debug!( + "Sending Decide event to builder states for view {:?}", + latest_decide_view_number + ); + if let Err(e) = decide_channel_sender + .broadcast(MessageType::DecideMessage(decide_msg)) + .await + { + tracing::warn!( + "Error {e}, failed to send Decide event to builder states for view {:?}", + latest_decide_view_number + ); + } +} + +#[derive(Debug)] +enum HandleReceivedTxnsError { + TransactionTooBig { + estimated_length: u64, + max_txn_len: u64, + }, + + TooManyTransactions, + + Internal(TrySendError>>), +} + +impl From> for BuildError { + fn from(error: HandleReceivedTxnsError) -> Self { + match error { + HandleReceivedTxnsError::TransactionTooBig { + estimated_length, + max_txn_len, + } => BuildError::Error(format!("Transaction too big (estimated length {estimated_length}, currently accepting <= {max_txn_len})")), + HandleReceivedTxnsError::TooManyTransactions => BuildError::Error("Too many transactions".to_owned()), + HandleReceivedTxnsError::Internal(err) => BuildError::Error(format!("Internal error when submitting transaction: {}", err)), + } + } +} + +impl From>>> + for HandleReceivedTxnsError +{ + fn from(err: TrySendError>>) -> Self { + match err { + TrySendError::Full(_) => HandleReceivedTxnsError::TooManyTransactions, + err => HandleReceivedTxnsError::Internal(err), + } + } +} + +/// Utility function that will take the given list +/// of transactions, `txns`, wraps them in a [`ReceivedTransaction`] struct +/// and attempt to broadcast them to the given transaction [`BroadcastSender`] +/// `tx_sender`. The broadcast itself it a non-blocking operation, and any +/// failures of the broadcast are collected into the returned vector +/// of [Result]s. +/// +/// There is also a `max_txn_len` parameter that is used to check to ensure +/// that transactions that exceed this threshold will also not be broadcasted. +pub(crate) async fn handle_received_txns( + tx_sender: &BroadcastSender>>, + txns: Vec, + source: TransactionSource, + max_txn_len: u64, +) -> Vec::Transaction>, BuildError>> { + HandleReceivedTxns::new(tx_sender.clone(), txns, source, max_txn_len) + .map(|res| res.map_err(Into::into)) + .collect() +} + +/// `HandleReceivedTxns` is a struct that is used to handle the processing of +/// the function [`handle_received_txns`]. In order to avoid the need to +/// double allocate a [Vec] from processing these entries, this struct exists +/// to be processed as an [Iterator] instead. +struct HandleReceivedTxns { + tx_sender: BroadcastSender>>, + txns: Vec, + source: TransactionSource, + max_txn_len: u64, + offset: usize, + txns_length: usize, + time_in: Instant, +} + +impl HandleReceivedTxns { + fn new( + tx_sender: BroadcastSender>>, + txns: Vec, + source: TransactionSource, + max_txn_len: u64, + ) -> Self { + let txns_length = txns.len(); + + Self { + tx_sender, + txns, + source, + max_txn_len, + offset: 0, + txns_length, + time_in: Instant::now(), + } + } +} + +impl Iterator for HandleReceivedTxns +where + Types::Transaction: Transaction, +{ + type Item = + Result::Transaction>, HandleReceivedTxnsError>; + + fn next(&mut self) -> Option { + if self.txns.is_empty() { + return None; + } + + if self.offset >= self.txns_length { + return None; + } + + let offset = self.offset; + // increment the offset so we can ensure we're making progress; + self.offset += 1; + + let tx = self.txns[offset].clone(); + let commit = tx.commit(); + // This is a rough estimate, but we don't have any other way to get real + // encoded transaction length. Luckily, this being roughly proportional + // to encoded length is enough, because we only use this value to estimate + // our limitations on computing the VID in time. + let len = tx.minimum_block_size(); + let max_txn_len = self.max_txn_len; + if len > max_txn_len { + tracing::warn!(%commit, %len, %max_txn_len, "Transaction too big"); + return Some(Err(HandleReceivedTxnsError::TransactionTooBig { + estimated_length: len, + max_txn_len: self.max_txn_len, + })); + } + + let res = self + .tx_sender + .try_broadcast(Arc::new(ReceivedTransaction { + tx, + source: self.source.clone(), + commit, + time_in: self.time_in, + len, + })) + .inspect(|val| { + if let Some(evicted_txn) = val { + tracing::warn!( + "Overflow mode enabled, transaction {} evicted", + evicted_txn.commit + ); + } + }) + .map(|_| commit) + .inspect_err(|err| { + tracing::warn!("Failed to broadcast txn with commit {:?}: {}", commit, err); + }) + .map_err(HandleReceivedTxnsError::from); + + Some(res) + } + + fn size_hint(&self) -> (usize, Option) { + ( + self.txns_length - self.offset, + Some(self.txns.capacity() - self.offset), + ) + } +} + +#[cfg(test)] +mod test { + use std::{sync::Arc, time::Duration}; + + use async_lock::RwLock; + use committable::Commitment; + use committable::Committable; + use futures::StreamExt; + use hotshot::{ + traits::BlockPayload, + types::{BLSPubKey, SignatureKey}, + }; + use hotshot_builder_api::v0_1::data_source::AcceptsTxnSubmits; + use hotshot_builder_api::v0_2::block_info::AvailableBlockInfo; + use hotshot_builder_api::v0_2::builder::TransactionStatus; + use hotshot_example_types::{ + block_types::{TestBlockPayload, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, + }; + use hotshot_types::traits::block_contents::Transaction; + use hotshot_types::{ + data::{DaProposal, Leaf, QuorumProposal, ViewNumber}, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{ + block_contents::{precompute_vid_commitment, vid_commitment}, + node_implementation::ConsensusTime, + signature_key::BuilderSignatureKey, + }, + utils::BuilderCommitment, + }; + use marketplace_builder_shared::{ + block::{BlockId, BuilderStateId, ParentBlockReferences}, + testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, + TEST_NUM_NODES_IN_VID_COMPUTATION, TEST_PROTOCOL_MAX_BLOCK_SIZE, + }, + }; + use sha2::{Digest, Sha256}; + use tokio::{ + spawn, + sync::{mpsc::unbounded_channel, oneshot}, + }; + + use crate::implementation::LegacyCommit; + use crate::{ + builder_state::{ + BuildBlockInfo, MessageType, RequestMessage, ResponseMessage, TransactionSource, + TriggerStatus, + }, + service::{BlockSizeLimits, HandleReceivedTxnsError}, + testing::finalization_test::{ + process_available_blocks_round, progress_round_with_available_block_info, + progress_round_without_available_block_info, setup_builder_for_test, + }, + }; + + use super::{ + handle_da_event_implementation, handle_quorum_event_implementation, AvailableBlocksError, + BlockInfo, ClaimBlockError, ClaimBlockHeaderInputError, GlobalState, HandleDaEventError, + HandleQuorumEventError, HandleReceivedTxns, ProxyGlobalState, + }; + + /// A const number on `max_tx_len` to be used consistently spanning all the tests + /// It is set to 1 as current estimation on `TestTransaction` is 1 + const TEST_MAX_TX_LEN: u64 = 1; + + // GlobalState Tests + + // GlobalState::new Tests + + /// This test checks a [GlobalState] created from [GlobalState::new] has + /// the appropriate values stored within it. + #[tokio::test] + async fn test_global_state_new() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(1), + ViewNumber::new(2), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + assert_eq!(state.blocks.len(), 0, "The blocks LRU should be empty"); + + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(1), + }; + + // There should be a single entry within the spawned_builder_states, + // and it should be the one that was just created. + assert_eq!( + state.spawned_builder_states.len(), + 1, + "There should be a single entry in the spawned builder states hashmap" + ); + + assert!(state.spawned_builder_states.contains_key(&builder_state_id), "The spawned builder states should contain an entry with the bootstrapped parameters passed into new"); + + assert!(!state.spawned_builder_states.contains_key(&BuilderStateId { parent_commitment: parent_commit, parent_view: ViewNumber::new(0) }), "The spawned builder states should not contain any other entry, as such it should not contain any entry with a higher view number, but the same parent commit"); + + // We can't compare the Senders directly + + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(2), + "The last garbage collected view number should be the one passed into new" + ); + + assert_eq!( + state.builder_state_to_last_built_block.len(), + 0, + "The builder state to last built block should be empty" + ); + + assert_eq!( + state.highest_view_num_builder_id, builder_state_id, + "The highest view number builder id should be the bootstrapped build state id" + ); + + assert_eq!( + state.block_size_limits.protocol_max_block_size, TEST_PROTOCOL_MAX_BLOCK_SIZE, + "The protocol max block size should be the one passed into new" + ); + + assert_eq!( + state.block_size_limits.max_block_size, state.block_size_limits.protocol_max_block_size, + "The max block size should be initialized to protocol max block size" + ); + } + + // GlobalState::register_builder_state Tests + + /// This test checks that the [GlobalState::register_builder_state] function + /// will correctly register a new builder state, and that the highest view + /// number builder id will be updated to the new builder state id. + /// Additionally, it will check that the spawned builder states hashmap + /// will contain the new builder state id. + #[tokio::test] + async fn test_global_state_register_builder_state_different_states() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + { + let (req_sender, _) = async_broadcast::broadcast(10); + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(5), + }; + + state.register_builder_state( + builder_state_id.clone(), + ParentBlockReferences { + view_number: ViewNumber::new(5), + vid_commitment: parent_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender.clone(), + ); + + assert_eq!( + state.spawned_builder_states.len(), + 2, + "The spawned_builder_states should now have 2 elements in it" + ); + assert_eq!( + state.highest_view_num_builder_id, builder_state_id, + "The highest view number builder id should now be the one that was just registered" + ); + assert!( + state.spawned_builder_states.contains_key(&builder_state_id), + "The spawned builder states should contain the new builder state id" + ); + }; + + { + let (req_sender, _) = async_broadcast::broadcast(10); + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(6), + }; + + state.register_builder_state( + builder_state_id.clone(), + ParentBlockReferences { + view_number: ViewNumber::new(6), + vid_commitment: parent_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender.clone(), + ); + + assert_eq!( + state.spawned_builder_states.len(), + 3, + "The spawned_builder_states should now have 3 elements in it" + ); + assert_eq!( + state.highest_view_num_builder_id, builder_state_id, + "The highest view number builder id should now be the one that was just registered" + ); + assert!( + state.spawned_builder_states.contains_key(&builder_state_id), + "The spawned builder states should contain the new builder state id" + ); + }; + } + + /// This test checks that the register_builder_state method will overwrite + /// the previous sender in the `spawned_builder_states` hashmap if the same + /// `BuilderStateId` is used to register a new sender. + /// + /// It also demonstrates that doing this will drop the previous sender, + /// effectively closing it if it is the only reference to it. + #[tokio::test] + async fn test_global_state_register_builder_state_same_builder_state_id() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + let mut req_receiver_1 = { + let (req_sender, req_receiver) = async_broadcast::broadcast(10); + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(5), + }; + + state.register_builder_state( + builder_state_id.clone(), + ParentBlockReferences { + view_number: ViewNumber::new(5), + vid_commitment: parent_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender.clone(), + ); + + assert_eq!( + state.spawned_builder_states.len(), + 2, + "The spawned_builder_states should now have 2 elements in it" + ); + assert_eq!( + state.highest_view_num_builder_id, builder_state_id, + "The highest view number builder id should now be the one that was just registered" + ); + + req_receiver + }; + + let mut req_receiver_2 = { + let (req_sender, req_receiver) = async_broadcast::broadcast(10); + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(5), + }; + + // This is the same BuilderStateId as the previous one, so it should + // replace the previous one. Which means that the previous one + // may no longer be published to. + state.register_builder_state( + builder_state_id.clone(), + ParentBlockReferences { + view_number: ViewNumber::new(5), + vid_commitment: parent_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender.clone(), + ); + + assert_eq!( + state.spawned_builder_states.len(), + 2, + "The spawned_builder_states should still have 2 elements in it" + ); + assert_eq!(state.highest_view_num_builder_id, builder_state_id, "The highest view number builder id should still be the one that was just registered"); + + req_receiver + }; + + { + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(5), + }; + + let req_id_and_sender = state.spawned_builder_states.get(&builder_state_id).unwrap(); + let (response_sender, _) = unbounded_channel(); + + assert!( + req_id_and_sender + .1 + .broadcast(MessageType::RequestMessage(RequestMessage { + state_id: builder_state_id, + response_channel: response_sender, + })) + .await + .is_ok(), + "This should be able to send a Message through the sender" + ); + } + + // The first receiver should have been replaced, so we won't get any + // results from it. + + assert!( + req_receiver_1.recv().await.is_err(), + "This first receiver should be closed" + ); + assert!( + req_receiver_2.recv().await.is_ok(), + "The second receiver should receive a message" + ); + } + + /// This test checks that the register_builder_state method will only + /// update the highest_view_num_builder_id if the new [BuilderStateId] has + /// a higher view number than the current highest_view_num_builder_id. + #[tokio::test] + async fn test_global_state_register_builder_state_decrementing_builder_state_ids() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + { + let (req_sender, _) = async_broadcast::broadcast(10); + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(6), + }; + + state.register_builder_state( + builder_state_id.clone(), + ParentBlockReferences { + view_number: ViewNumber::new(6), + vid_commitment: parent_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender.clone(), + ); + + assert_eq!( + state.spawned_builder_states.len(), + 2, + "The spawned_builder_states should now have 2 elements in it" + ); + assert_eq!( + state.highest_view_num_builder_id, builder_state_id, + "The highest view number builder id should now be the one that was just registered" + ); + assert!( + state.spawned_builder_states.contains_key(&builder_state_id), + "The spawned builder states should contain the new builder state id" + ); + }; + + { + let (req_sender, _) = async_broadcast::broadcast(10); + let builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(5), + }; + + state.register_builder_state( + builder_state_id.clone(), + ParentBlockReferences { + view_number: ViewNumber::new(5), + vid_commitment: parent_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + req_sender.clone(), + ); + + assert_eq!( + state.spawned_builder_states.len(), + 3, + "The spawned_builder_states should now have 3 elements in it" + ); + assert_eq!( + state.highest_view_num_builder_id, + BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(6) + }, + "The highest view number builder id should now be the one that was just registered" + ); + assert!( + state.spawned_builder_states.contains_key(&builder_state_id), + "The spawned builder states should contain the new builder state id" + ); + }; + } + + // GlobalState::update_global_state Tests + + /// This test checks that the update_global_state method will correctly + /// update the LRU blocks cache and the builder_state_to_last_built_block + /// hashmap with values derived from the parameters passed into the method. + /// + /// The assumption behind this test is that the values being stored were + /// not being stored previously. + #[tokio::test] + async fn test_global_state_update_global_state_success() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + let new_parent_commit = vid_commitment(&[], 9); + let new_view_num = ViewNumber::new(1); + let builder_state_id = BuilderStateId { + parent_commitment: new_parent_commit, + parent_view: new_view_num, + }; + + let builder_hash_1 = BuilderCommitment::from_bytes([1, 2, 3, 4]); + let block_id = BlockId { + hash: builder_hash_1, + view: new_view_num, + }; + + let (vid_trigger_sender, vid_trigger_receiver) = oneshot::channel(); + let (vid_sender, vid_receiver) = unbounded_channel(); + let (block_payload, metadata) = + >::from_transactions( + vec![TestTransaction::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])], + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + .unwrap(); + let offered_fee = 64u64; + let block_size = 64u64; + let truncated = false; + + let build_block_info = BuildBlockInfo { + id: block_id.clone(), + block_size, + offered_fee, + block_payload: block_payload.clone(), + metadata, + vid_trigger: vid_trigger_sender, + vid_receiver, + truncated, + }; + + let builder_hash_2 = BuilderCommitment::from_bytes([2, 3, 4, 5]); + let response_msg = ResponseMessage { + builder_hash: builder_hash_2.clone(), + block_size: 32, + offered_fee: 128, + }; + + // Now that every object is prepared and setup for storage, we can + // test the `update_global_state` method. + + // `update_global_state` has not return value from its method, so can + // only inspect its "success" based on the mutation of the state object. + state.update_global_state(builder_state_id.clone(), build_block_info, response_msg); + + // two things should be adjusted by `update_global_state`: + // - state.blocks + // - state.builder_state_to_last_built_block + + // start with blocks + + assert_eq!( + state.blocks.len(), + 1, + "The blocks LRU should have a single entry" + ); + + let retrieved_block_info = state.blocks.get(&block_id); + assert!( + retrieved_block_info.is_some(), + "Retrieval of the block id should result is a valid block info data" + ); + + let retrieved_block_info = retrieved_block_info.unwrap(); + + assert_eq!( + retrieved_block_info.block_payload, block_payload, + "The block payloads should match" + ); + assert_eq!( + retrieved_block_info.metadata, metadata, + "The metadata should match" + ); + assert_eq!( + retrieved_block_info.offered_fee, offered_fee, + "The offered fee should match" + ); + assert_eq!( + retrieved_block_info.truncated, truncated, + "The truncated flag should match" + ); + + { + // This ensures that the vid_trigger that is stored is still the + // same, or links to the vid_trigger_receiver that we submitted. + let mut vid_trigger_write_lock_guard = + retrieved_block_info.vid_trigger.write_arc().await; + if let Some(vid_trigger_sender) = vid_trigger_write_lock_guard.take() { + vid_trigger_sender + .send(TriggerStatus::Start) + .expect("vid_trigger_sender failed"); + } + + match vid_trigger_receiver.await { + Ok(TriggerStatus::Start) => { + // This is expected + } + _ => { + panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); + } + } + } + + { + // This ensures that the vid_sender that is stored is still the + // same, or links to the vid_receiver that we submitted. + let (vid_commitment, vid_precompute) = + precompute_vid_commitment(&[1, 2, 3, 4, 5], TEST_NUM_NODES_IN_VID_COMPUTATION); + assert_eq!( + vid_sender.send((vid_commitment, vid_precompute.clone())), + Ok(()), + "The vid_sender should be able to send the vid commitment and precompute" + ); + + let mut vid_receiver_write_lock_guard = + retrieved_block_info.vid_receiver.write_arc().await; + + // Get and Keep object + + match vid_receiver_write_lock_guard.get().await { + Ok((received_vid_commitment, received_vid_precompute)) => { + assert_eq!( + received_vid_commitment, vid_commitment, + "The received vid commitment should match the expected vid commitment" + ); + assert_eq!( + received_vid_precompute, vid_precompute, + "The received vid precompute should match the expected vid precompute" + ); + } + _ => { + panic!("did not receive the expected vid commitment and precompute from vid_receiver_write_lock_guard"); + } + } + } + + // finish with builder_state_to_last_built_block + + assert_eq!( + state.builder_state_to_last_built_block.len(), + 1, + "The builder state to last built block should have a single entry" + ); + + let last_built_block = state + .builder_state_to_last_built_block + .get(&builder_state_id); + + assert!( + last_built_block.is_some(), + "The last built block should be retrievable" + ); + + let last_built_block = last_built_block.unwrap(); + + assert_eq!( + last_built_block.builder_hash, builder_hash_2, + "The last built block id should match the block id" + ); + + assert_eq!( + last_built_block.block_size, 32, + "The last built block size should match the response message" + ); + + assert_eq!( + last_built_block.offered_fee, 128, + "The last built block offered fee should match the response message" + ); + } + + /// This test demonstrates the replacement behavior of the the + /// `update_global_state` method. + /// + /// When given a `BuilderStateId` that already exists in the `blocks` LRU, + /// and the `builder_state_to_last_built_block` hashmap, the method will + /// replace the values in the `builder_state_to_last_built_block` hashmap, + /// and it will also replace the entry in the `block`s LRU. + #[tokio::test] + async fn test_global_state_update_global_state_replacement() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + let new_parent_commit = vid_commitment(&[], 9); + let new_view_num = ViewNumber::new(1); + let builder_state_id = BuilderStateId { + parent_commitment: new_parent_commit, + parent_view: new_view_num, + }; + + let builder_hash = BuilderCommitment::from_bytes([1, 2, 3, 4]); + let block_id_1 = BlockId { + hash: builder_hash.clone(), + view: new_view_num, + }; + let (vid_trigger_sender_1, vid_trigger_receiver_1) = oneshot::channel(); + let (vid_sender_1, vid_receiver_1) = unbounded_channel(); + let (block_payload_1, metadata_1) = + >::from_transactions( + vec![TestTransaction::new(vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10])], + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + .unwrap(); + let offered_fee_1 = 64u64; + let block_size_1 = 64u64; + let truncated_1 = false; + let build_block_info_1 = BuildBlockInfo { + id: block_id_1.clone(), + block_size: block_size_1, + offered_fee: offered_fee_1, + block_payload: block_payload_1.clone(), + metadata: metadata_1, + vid_trigger: vid_trigger_sender_1, + vid_receiver: vid_receiver_1, + truncated: truncated_1, + }; + let response_msg_1 = ResponseMessage { + builder_hash: builder_hash.clone(), + block_size: block_size_1, + offered_fee: offered_fee_1, + }; + + // Now that every object is prepared and setup for storage, we can + // test the `update_global_state` method. + + // `update_global_state` has no return value from its method, so we can + // only inspect its "success" based on the mutation of the state object. + state.update_global_state(builder_state_id.clone(), build_block_info_1, response_msg_1); + + // We're going to enter another update_global_state_entry with the same + // builder_state_id, but with different values for the block info and + // response message. This should highlight that the values get replaced + // in this update. + + let block_id_2 = BlockId { + hash: builder_hash.clone(), + view: new_view_num, + }; + let (vid_trigger_sender_2, vid_trigger_receiver_2) = oneshot::channel(); + let (vid_sender_2, vid_receiver_2) = unbounded_channel(); + let (block_payload_2, metadata_2) = + >::from_transactions( + vec![TestTransaction::new(vec![2, 3, 4, 5, 6, 7, 8, 9, 10, 11])], + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + .unwrap(); + let offered_fee_2 = 16u64; + let block_size_2 = 32u64; + let truncated_2 = true; + let build_block_info_2 = BuildBlockInfo { + id: block_id_2.clone(), + block_size: block_size_2, + offered_fee: offered_fee_2, + block_payload: block_payload_2.clone(), + metadata: metadata_2, + vid_trigger: vid_trigger_sender_2, + vid_receiver: vid_receiver_2, + truncated: truncated_2, + }; + let response_msg_2: ResponseMessage = ResponseMessage { + builder_hash: builder_hash.clone(), + block_size: block_size_2, + offered_fee: offered_fee_2, + }; + + // two things should be adjusted by `update_global_state`: + // When given the same build_state_ids. + state.update_global_state(builder_state_id.clone(), build_block_info_2, response_msg_2); + + // start with blocks + + assert_eq!( + state.blocks.len(), + 1, + "The blocks LRU should have a single entry" + ); + + let retrieved_block_info = state.blocks.get(&block_id_2); + assert!( + retrieved_block_info.is_some(), + "Retrieval of the block id should result is a valid block info data" + ); + + let retrieved_block_info = retrieved_block_info.unwrap(); + + assert_eq!( + retrieved_block_info.block_payload, block_payload_2, + "The block payloads should match" + ); + assert_ne!( + retrieved_block_info.block_payload, block_payload_1, + "The block payloads should not match" + ); + assert_eq!( + retrieved_block_info.metadata, metadata_2, + "The metadata should match" + ); + assert_eq!( + retrieved_block_info.metadata, metadata_1, + "The metadata should match" + ); + // TestMetadata will always match + + assert_eq!( + retrieved_block_info.offered_fee, offered_fee_2, + "The offered fee should match" + ); + assert_ne!( + retrieved_block_info.offered_fee, offered_fee_1, + "The offered fee should not match" + ); + assert_eq!( + retrieved_block_info.truncated, truncated_2, + "The truncated flag should match" + ); + assert_ne!( + retrieved_block_info.truncated, truncated_1, + "The truncated flag should not match" + ); + + { + // This ensures that the vid_trigger that is stored is still the + // same, or links to the vid_trigger_receiver that we submitted. + let mut vid_trigger_write_lock_guard = + retrieved_block_info.vid_trigger.write_arc().await; + if let Some(vid_trigger_sender) = vid_trigger_write_lock_guard.take() { + vid_trigger_sender + .send(TriggerStatus::Start) + .expect("vid_trigger_sender failed"); + } + + match vid_trigger_receiver_2.await { + Ok(TriggerStatus::Start) => { + // This is expected + } + _ => { + panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); + } + } + + assert!( + vid_trigger_receiver_1.await.is_err(), + "This should not receive anything from vid_trigger_receiver_1" + ); + } + + { + // This ensures that the vid_sender that is stored is still the + // same, or links to the vid_receiver that we submitted. + let (vid_commitment, vid_precompute) = + precompute_vid_commitment(&[1, 2, 3, 4, 5], TEST_NUM_NODES_IN_VID_COMPUTATION); + assert_eq!( + vid_sender_2.send((vid_commitment, vid_precompute.clone())), + Ok(()), + "The vid_sender should be able to send the vid commitment and precompute" + ); + + assert!( + vid_sender_1 + .send((vid_commitment, vid_precompute.clone())) + .is_err(), + "The vid_sender should not be able to send the vid commitment and precompute" + ); + + let mut vid_receiver_write_lock_guard = + retrieved_block_info.vid_receiver.write_arc().await; + + // Get and Keep object + + match vid_receiver_write_lock_guard.get().await { + Ok((received_vid_commitment, received_vid_precompute)) => { + assert_eq!( + received_vid_commitment, vid_commitment, + "The received vid commitment should match the expected vid commitment" + ); + assert_eq!( + received_vid_precompute, vid_precompute, + "The received vid precompute should match the expected vid precompute" + ); + } + _ => { + panic!("did not receive the expected vid commitment and precompute from vid_receiver_write_lock_guard"); + } + } + } + + // finish with builder_state_to_last_built_block + + assert_eq!( + state.builder_state_to_last_built_block.len(), + 1, + "The builder state to last built block should have a single entry" + ); + + let last_built_block = state + .builder_state_to_last_built_block + .get(&builder_state_id); + + assert!( + last_built_block.is_some(), + "The last built block should be retrievable" + ); + + let last_built_block = last_built_block.unwrap(); + + assert_eq!( + last_built_block.builder_hash, builder_hash, + "The last built block id should match the block id" + ); + + assert_eq!( + last_built_block.block_size, block_size_2, + "The last built block size should match the response message" + ); + assert_ne!( + last_built_block.block_size, block_size_1, + "The last built block size should not match the previous block size" + ); + + assert_eq!( + last_built_block.offered_fee, offered_fee_2, + "The last built block offered fee should match the response message" + ); + assert_ne!( + last_built_block.offered_fee, offered_fee_1, + "The last built block offered fee should not match the previous block offered fee" + ); + } + + // GlobalState::remove_handles Tests + + /// This test checks to ensure that remove_handles will only consider + /// views up to what is known to have been stored. As a result it will + /// indicate that is has only targeted to the highest view number that it + /// is aware of. + #[tokio::test] + async fn test_global_state_remove_handles_prune_up_to_latest() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + // We register a few builder states. + for i in 1..=10 { + let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); + let view = ViewNumber::new(i as u64); + + state.register_builder_state( + BuilderStateId { + parent_commitment: vid_commit, + parent_view: view, + }, + ParentBlockReferences { + view_number: view, + vid_commitment: vid_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + async_broadcast::broadcast(10).0, + ); + } + + assert_eq!( + state.spawned_builder_states.len(), + 11, + "The spawned_builder_states should have the expected number of entries", + ); + + assert_eq!( + state.remove_handles(ViewNumber::new(100)), + ViewNumber::new(10), + "It should only be able to prune up to what has been stored" + ); + + assert_eq!( + state.spawned_builder_states.len(), + 1, + "The spawned_builder_states should only have a single entry in it" + ); + + let builder_state_id = BuilderStateId { + parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(10), + }; + assert_eq!( + state.highest_view_num_builder_id, builder_state_id, + "The highest view number builder id should be the one that was just registered" + ); + + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(9), + "The last garbage collected view number should match expected value" + ); + + assert!( + state.spawned_builder_states.contains_key(&BuilderStateId { + parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(10), + }), + "The spawned builder states should contain the builder state id: {builder_state_id}" + ); + } + + /// This test checks that the remove_handles doesn't ensure that the + /// `last_garbage_collected_view_num` is strictly increasing. By first + /// removing a higher view number, followed by a smaller view number + /// (with the highest_view_num_builder_id having a view greater than or + /// equal to both targets) we can demonstrate this property. + /// + /// Furthermore this demonstrates that by supplying any view number to + /// remove_handles that is less than `last_garbage_collected_view_num` will + /// result in `last_garbage_collected_view_num` being updated to the given + /// value minus 1, without regard for it actually removing / cleaning + /// anything, or whether it is moving backwards in view numbers. + /// + /// If we were to account for the view numbers actually being cleaned up, + /// we could still trigger this behavior be re-adding the builder states + /// with a view number that precedes the last garbage collected view number, + /// then removing them would trigger the same behavior. + #[tokio::test] + async fn test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_simple() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + // We register a few builder states. + for i in 1..=10 { + let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); + let view = ViewNumber::new(i as u64); + + state.register_builder_state( + BuilderStateId { + parent_commitment: vid_commit, + parent_view: view, + }, + ParentBlockReferences { + view_number: view, + vid_commitment: vid_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + async_broadcast::broadcast(10).0, + ); + } + + assert_eq!( + state.highest_view_num_builder_id, + BuilderStateId { + parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(10), + }, + "The highest view number builder id should be the one that was just registered" + ); + + assert_eq!( + state.remove_handles(ViewNumber::new(10)), + ViewNumber::new(10), + "It should remove what has been stored" + ); + + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(9), + "The last garbage collected view number should match expected value" + ); + + assert_eq!( + state.remove_handles(ViewNumber::new(5)), + ViewNumber::new(5), + "If we only remove up to view 5, then only entries preceding view 5 should be removed" + ); + + // The last garbage collected view has gone down as a result of our + // new remove_handles target, demonstrating that this number isn't + // strictly increasing in value. + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(4), + "The last garbage collected view number should match expected value", + ); + } + + /// This test checks that the remove_handles doesn't ensure that the + /// `last_garbage_collected_view_num` is strictly increasing. It is very + /// similar to `test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_simple` + /// but differs in that it re-adds the removed builder states, just in case + /// the previous test's behavior is erroneous and fixed by ensuring that we + /// only consider removed view numbers. + #[tokio::test] + async fn test_global_state_remove_handles_can_reduce_last_garbage_collected_view_num_strict() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + // We register a few builder states. + for i in 1..=10 { + let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); + let view = ViewNumber::new(i as u64); + + state.register_builder_state( + BuilderStateId { + parent_commitment: vid_commit, + parent_view: view, + }, + ParentBlockReferences { + view_number: view, + vid_commitment: vid_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + async_broadcast::broadcast(10).0, + ); + } + + assert_eq!( + state.highest_view_num_builder_id, + BuilderStateId { + parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(10), + }, + "The highest view number builder id should be the one that was just registered" + ); + + assert_eq!( + state.remove_handles(ViewNumber::new(10)), + ViewNumber::new(10), + "It should remove what has been stored" + ); + + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(9), + "The last garbage collected view number should match expected value" + ); + + // We re-add these removed builder_state_ids + for i in 1..10 { + let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); + let view = ViewNumber::new(i as u64); + + state.register_builder_state( + BuilderStateId { + parent_commitment: vid_commit, + parent_view: view, + }, + ParentBlockReferences { + view_number: view, + vid_commitment: vid_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + async_broadcast::broadcast(10).0, + ); + } + + assert_eq!( + state.remove_handles(ViewNumber::new(5)), + ViewNumber::new(5), + "If we only remove up to view 5, then only entries preceding view 5 should be removed" + ); + + // The last garbage collected view has gone down as a result of our + // new remove_handles target, demonstrating that this number isn't + // strictly increasing in value. + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(4), + "The last garbage collected view number should match expected value", + ); + } + + /// This test checks that the remove_handles methods will correctly remove + /// The expected number of builder states from the spawned_builder_states + /// hashmap. It does this by specifically controlling the number of builder + /// states that are registered, and then removing a subset of them. It + /// verifies the absence of the entries that should have been removed, and + /// the presence of the entries that should have been kept. + #[tokio::test] + async fn test_global_state_remove_handles_expected() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let parent_commit = vid_commitment(&[0], TEST_NUM_NODES_IN_VID_COMPUTATION); + let mut state = GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ); + + // We register a few builder states. + for i in 1..=10 { + let vid_commit = vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION); + let view = ViewNumber::new(i as u64); + + state.register_builder_state( + BuilderStateId { + parent_commitment: vid_commit, + parent_view: view, + }, + ParentBlockReferences { + view_number: view, + vid_commitment: vid_commit, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + async_broadcast::broadcast(10).0, + ); + } + + assert_eq!( + state.spawned_builder_states.len(), + 11, + "The spawned_builder_states should have 11 elements in it" + ); + + assert_eq!( + state.highest_view_num_builder_id, + BuilderStateId { + parent_commitment: vid_commitment(&[10], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(10), + }, + "The highest view number builder id should be the one that was just registered" + ); + + assert_eq!( + state.last_garbage_collected_view_num, + ViewNumber::new(0), + "The last garbage collected view number should be hat was passed in" + ); + + // Now we want to clean up some previous builder states to ensure that we + // remove the appropriate targets. + + // This should remove the view builder states preceding the view number 5 + assert_eq!( + state.remove_handles(ViewNumber::new(5)), + ViewNumber::new(5), + "The last garbage collected view number should match expected value" + ); + + // There should be 11 - 5 entries remaining + assert_eq!( + state.spawned_builder_states.len(), + 6, + "The spawned_builder_states should have 6 elements in it" + ); + + for i in 0..5 { + let builder_state_id = BuilderStateId { + parent_commitment: vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(i as u64), + }; + assert!( + !state.spawned_builder_states.contains_key(&builder_state_id), + "the spawned builder states should contain the builder state id, {builder_state_id}" + ); + } + + for i in 5..=10 { + let builder_state_id = BuilderStateId { + parent_commitment: vid_commitment(&[i], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::new(i as u64), + }; + assert!( + state.spawned_builder_states.contains_key(&builder_state_id), + "The spawned builder states should contain the builder state id: {builder_state_id}" + ); + } + } + + // Get Available Blocks Tests + + /// This test checks that the error `AvailableBlocksError::NoBlocksAvailable` + /// is returned when no blocks are available. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of get_available_blocks, and we do not provide any + /// information for the block view number requested. As a result, the + /// implementation will ultimately timeout, and return an error that + /// indicates that no blocks were available. + #[tokio::test] + async fn test_get_available_blocks_error_no_blocks_available() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key), + Duration::from_millis(100), + ); + + // leader_private_key + let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); + + // This *should* just time out + let result = state + .available_blocks_implementation( + &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), + 1, + leader_public_key, + &signature, + ) + .await; + + match result { + Err(AvailableBlocksError::NoBlocksAvailable) => { + // This is what we expect. + // This message *should* indicate that no blocks were available. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `AvailableBlocksError::SignatureValidationFailed` + /// is returned when the signature is invalid. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of get_available_blocks, but we sign the request with + /// the builder's private key instead of the leader's private key. Since + /// these keys do not match, this will result in a signature verification + /// error. + #[tokio::test] + async fn test_get_available_blocks_error_invalid_signature() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, _leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_millis(100), + ); + + // leader_private_key + let signature = BLSPubKey::sign(&builder_private_key, parent_commit.as_ref()).unwrap(); + + // This *should* just time out + let result = state + .available_blocks_implementation( + &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), + 1, + leader_public_key, + &signature, + ) + .await; + + match result { + Err(AvailableBlocksError::SignatureValidationFailed) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided` + /// is returned when the requested view number has already been garbage + /// collected. + /// + /// To trigger this condition, we initialize the GlobalState with a + /// garbage collected view number that is higher than the view that will + /// be requested. + #[tokio::test] + async fn test_get_available_blocks_error_requesting_previous_view_number() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(2), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key), + Duration::from_millis(100), + ); + + // leader_private_key + let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); + + // This *should* just time out + let result = state + .available_blocks_implementation( + &vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), + 1, + leader_public_key, + &signature, + ) + .await; + + match result { + Err(AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `AvailableBlocksError::GetChannelForMatchingBuilderError` + /// is returned when attempting to retrieve a view that is not stored within the state, and + /// the highest view is also no longer stored within the state. + /// + /// To trigger this condition, we initialize the GlobalState with an initial + /// state, and then we mutate the state to record the wrong latest state id. + /// When interacted with `GlobalState` via `register_builder_state`, and + /// `remove_handles`, this error doesn't seem possible immediately possible. + #[tokio::test] + async fn test_get_available_blocks_error_get_channel_for_matching_builder() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(4), + ViewNumber::new(4), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + { + let mut write_locked_global_state = state.global_state.write_arc().await; + write_locked_global_state.highest_view_num_builder_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(5), + }; + } + + // As a result, we **should** be receiving a request for the available + // blocks with our expected state id on the receiver, along with a channel + // to send the response back to the caller. + + let signature = BLSPubKey::sign(&leader_private_key, parent_commit.as_ref()).unwrap(); + let result = state + .available_blocks_implementation(&parent_commit, 6, leader_public_key, &signature) + .await; + match result { + Err(AvailableBlocksError::GetChannelForMatchingBuilderError(_)) => { + // This is what we expect. + // This message *should* indicate that the response channel was closed. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + // We have two error cases for `available_blocks_implementation` that we + // cannot seem trigger directly due to the nature of how the implementation + // performs. + // + // The first is ChannelUnexpectedlyClosed, which doesn't seem to be + // producible as the unbounded channel doesn't seem to be able to be + // closed. + // + // The second is SigningBlockFailed, which doesn't seem to be producible + // with a valid private key, and it's not clear how to create an invalid + // private key. + + /// This test checks that call to `available_blocks_implementation` returns + /// a successful response when the function is called before blocks are + /// made available. + #[tokio::test] + async fn test_get_available_blocks_requested_before_blocks_available() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let cloned_parent_commit = parent_commit; + let cloned_state = state.clone(); + let cloned_leader_private_key = leader_private_key.clone(); + + // We want to trigger a request for the available blocks, before we make the available block available + let get_available_blocks_handle = spawn(async move { + // leader_private_key + let signature = + BLSPubKey::sign(&cloned_leader_private_key, cloned_parent_commit.as_ref()).unwrap(); + cloned_state + .available_blocks_implementation( + &cloned_parent_commit, + 1, + leader_public_key, + &signature, + ) + .await + }); + + // Now we want to make the block data available to the state. + let expected_builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(1), + }; + + let mut response_receiver = { + // We only want to keep this write lock for the time needed, and + // no more. + let mut write_locked_global_state = state.global_state.write_arc().await; + + // We insert a sender so that the next time this stateId is requested, + // it will be available to send data back. + let (response_sender, response_receiver) = async_broadcast::broadcast(10); + write_locked_global_state.register_builder_state( + expected_builder_state_id.clone(), + ParentBlockReferences { + view_number: expected_builder_state_id.parent_view, + vid_commitment: expected_builder_state_id.parent_commitment, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + response_sender, + ); + + response_receiver + }; + + // As a result, we **should** be receiving a request for the available + // blocks with our expected state id on the receiver, along with a channel + // to send the response back to the caller. + + let response_channel = match response_receiver.next().await { + None => { + panic!("Expected a request for available blocks, but didn't get one"); + } + Some(MessageType::RequestMessage(req_msg)) => { + assert_eq!(req_msg.state_id, expected_builder_state_id); + req_msg.response_channel + } + Some(message) => { + panic!( + "Expected a request for available blocks, but got a different message: {:?}", + message + ); + } + }; + + // We want to send a ResponseMessage to the channel + let expected_response = ResponseMessage { + block_size: 9, + offered_fee: 7, + builder_hash: BuilderCommitment::from_bytes([1, 2, 3, 4, 5]), + }; + + assert!( + response_channel.send(expected_response.clone()).is_ok(), + "failed to send ResponseMessage" + ); + + let result = get_available_blocks_handle + .await + .expect("get_available_blocks_handle failed"); + match result { + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(result) => { + assert_eq!( + result, + vec![AvailableBlockInfo { + block_hash: expected_response.builder_hash.clone(), + block_size: expected_response.block_size, + offered_fee: expected_response.offered_fee, + signature: ::sign_block_info( + &builder_private_key, + expected_response.block_size, + expected_response.offered_fee, + &expected_response.builder_hash, + ) + .unwrap(), + sender: builder_public_key, + _phantom: Default::default(), + }], + "get_available_blocks response matches expectation" + ); + } + } + } + + /// This test checks that call to `available_blocks_implementation` returns + /// a successful response when the function is called after blocks are + /// made available. + #[tokio::test] + async fn test_get_available_blocks_requested_after_blocks_available() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let cloned_parent_commit = parent_commit; + let cloned_state = state.clone(); + let cloned_leader_private_key = leader_private_key.clone(); + + // Now we want to make the block data available to the state. + let expected_builder_state_id = BuilderStateId { + parent_commitment: parent_commit, + parent_view: ViewNumber::new(1), + }; + + let mut response_receiver = { + // We only want to keep this write lock for the time needed, and + // no more. + let mut write_locked_global_state = state.global_state.write_arc().await; + + // We insert a sender so that the next time this stateId is requested, + // it will be available to send data back. + let (response_sender, response_receiver) = async_broadcast::broadcast(10); + write_locked_global_state.register_builder_state( + expected_builder_state_id.clone(), + ParentBlockReferences { + view_number: expected_builder_state_id.parent_view, + vid_commitment: expected_builder_state_id.parent_commitment, + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + response_sender, + ); + + response_receiver + }; + + // We want to trigger a request for the available blocks, before we make the available block available + let get_available_blocks_handle = spawn(async move { + // leader_private_key + let signature = + BLSPubKey::sign(&cloned_leader_private_key, cloned_parent_commit.as_ref()).unwrap(); + cloned_state + .available_blocks_implementation( + &cloned_parent_commit, + 1, + leader_public_key, + &signature, + ) + .await + }); + + // As a result, we **should** be receiving a request for the available + // blocks with our expected state id on the receiver, along with a channel + // to send the response back to the caller. + + let response_channel = match response_receiver.next().await { + None => { + panic!("Expected a request for available blocks, but didn't get one"); + } + Some(MessageType::RequestMessage(req_msg)) => { + assert_eq!(req_msg.state_id, expected_builder_state_id); + req_msg.response_channel + } + Some(message) => { + panic!( + "Expected a request for available blocks, but got a different message: {:?}", + message + ); + } + }; + + // We want to send a ResponseMessage to the channel + let expected_response = ResponseMessage { + block_size: 9, + offered_fee: 7, + builder_hash: BuilderCommitment::from_bytes([1, 2, 3, 4, 5]), + }; + + assert!( + response_channel.send(expected_response.clone()).is_ok(), + "failed to send ResponseMessage" + ); + + let result = get_available_blocks_handle + .await + .expect("get_available_blocks_handle failed"); + match result { + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(result) => { + assert_eq!( + result, + vec![AvailableBlockInfo { + block_hash: expected_response.builder_hash.clone(), + block_size: expected_response.block_size, + offered_fee: expected_response.offered_fee, + signature: ::sign_block_info( + &builder_private_key, + expected_response.block_size, + expected_response.offered_fee, + &expected_response.builder_hash, + ) + .unwrap(), + sender: builder_public_key, + _phantom: Default::default(), + }], + "get_available_blocks response matches expectation" + ); + } + } + } + + // Claim Block Tests + + /// This test checks that the error `ClaimBlockError::SignatureValidationFailed` + /// is returned when the signature is invalid. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of claim_block, but we sign the request with + /// the builder's private key instead of the leader's private key. Since + /// these keys do not match, this will result in a signature verification + /// error. + #[tokio::test] + async fn test_claim_block_error_signature_validation_failed() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, _leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + + let signature = BLSPubKey::sign(&builder_private_key, commitment.as_ref()).unwrap(); + let result = state + .claim_block_implementation(&commitment, 1, leader_public_key, &signature) + .await; + + match result { + Err(ClaimBlockError::SignatureValidationFailed) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `ClaimBlockError::BlockDataNotFound` + /// is returned when the block data is not found. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of claim_block, but we do not provide any information + /// for the block data requested. As a result, the implementation will + /// ultimately timeout, and return an error that indicates that the block + /// data was not found. + #[tokio::test] + async fn test_claim_block_error_block_data_not_found() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + + let signature = BLSPubKey::sign(&leader_private_key, commitment.as_ref()).unwrap(); + let result = state + .claim_block_implementation(&commitment, 1, leader_public_key, &signature) + .await; + + match result { + Err(ClaimBlockError::BlockDataNotFound) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the function completes successfully. + #[tokio::test] + async fn test_claim_block_success() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + let cloned_commitment = commitment.clone(); + let cloned_state = state.clone(); + + let vid_trigger_receiver = { + let mut global_state_write_lock = state.global_state.write_arc().await; + let block_id = BlockId { + hash: commitment, + view: ViewNumber::new(1), + }; + + let payload = TestBlockPayload { + transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], + }; + + let (vid_trigger_sender, vid_trigger_receiver) = oneshot::channel(); + let (_, vid_receiver) = unbounded_channel(); + + global_state_write_lock.blocks.put( + block_id, + BlockInfo { + block_payload: payload, + metadata: TestMetadata { + num_transactions: 1, + }, + vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), + vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( + vid_receiver, + ))), + offered_fee: 100, + truncated: false, + }, + ); + + vid_trigger_receiver + }; + + let claim_block_join_handle = spawn(async move { + let signature = + BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); + cloned_state + .claim_block_implementation(&cloned_commitment, 1, leader_public_key, &signature) + .await + }); + + // This should be the started event + match vid_trigger_receiver.await { + Ok(TriggerStatus::Start) => { + // This is what we expect. + } + _ => { + panic!("Expected a TriggerStatus::Start event"); + } + } + + let result = claim_block_join_handle.await; + + match result { + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + // This is expected + } + } + } + + // Claim Block Header Input Tests + + /// This test checks that the error `ClaimBlockHeaderInputError::SignatureValidationFailed` + /// is returned when the signature is invalid. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of claim_block, but we sign the request with + /// the builder's private key instead of the leader's private key. Since + /// these keys do not match, this will result in a signature verification + /// error. + #[tokio::test] + async fn test_claim_block_header_input_error_signature_verification_failed() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, _leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + + let signature = BLSPubKey::sign(&builder_private_key, commitment.as_ref()).unwrap(); + + let result = state + .claim_block_header_input_implementation(&commitment, 1, leader_public_key, &signature) + .await; + + match result { + Err(ClaimBlockHeaderInputError::SignatureValidationFailed) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `ClaimBlockHeaderInputError::BlockHeaderNotFound` + /// is returned when the block header is not found. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of claim_block, but we do not provide any information + /// for the block header requested. As a result, the implementation will + /// ultimately timeout, and return an error that indicates that the block + /// header was not found. + #[tokio::test] + async fn test_claim_block_header_input_error_block_header_not_found() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + + let signature = BLSPubKey::sign(&leader_private_key, commitment.as_ref()).unwrap(); + + let result = state + .claim_block_header_input_implementation(&commitment, 1, leader_public_key, &signature) + .await; + + match result { + Err(ClaimBlockHeaderInputError::BlockHeaderNotFound) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `ClaimBlockHeaderInputError::CouldNotGetVidInTime` + /// is returned when the VID is not received in time. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of claim_block, but we do not provide a VID. As a result, + /// the implementation will ultimately timeout, and return an error that + /// indicates that the VID was not received in time. + /// + /// At least that's what it should do. At the moment, this results in a + /// deadlock due to attempting to acquire the `write_arc` twice. + #[tokio::test] + async fn test_claim_block_header_input_error_could_not_get_vid_in_time() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + let cloned_commitment = commitment.clone(); + let cloned_state = state.clone(); + + let _vid_sender = { + let mut global_state_write_lock = state.global_state.write_arc().await; + let block_id = BlockId { + hash: commitment, + view: ViewNumber::new(1), + }; + + let payload = TestBlockPayload { + transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], + }; + + let (vid_trigger_sender, _) = oneshot::channel(); + let (vid_sender, vid_receiver) = unbounded_channel(); + + global_state_write_lock.blocks.put( + block_id, + BlockInfo { + block_payload: payload, + metadata: TestMetadata { + num_transactions: 1, + }, + vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), + vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( + vid_receiver, + ))), + offered_fee: 100, + truncated: false, + }, + ); + + vid_sender + }; + + let claim_block_header_input_join_handle = spawn(async move { + let signature = + BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); + cloned_state + .claim_block_header_input_implementation( + &cloned_commitment, + 1, + leader_public_key, + &signature, + ) + .await + }); + + let result = claim_block_header_input_join_handle + .await + .expect("join error"); + + match result { + Err(ClaimBlockHeaderInputError::CouldNotGetVidInTime) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that the error `ClaimBlockHeaderInputError::WaitAndKeepGetError` + /// is returned when the VID is not received in time. + /// + /// To trigger this condition, we simply submit a request to the + /// implementation of claim_block, but we close the VID receiver channel's + /// sender. + #[tokio::test] + async fn test_claim_block_header_input_error_keep_and_wait_get_error() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + let cloned_commitment = commitment.clone(); + let cloned_state = state.clone(); + + { + let mut global_state_write_lock = state.global_state.write_arc().await; + let block_id = BlockId { + hash: commitment, + view: ViewNumber::new(1), + }; + + let payload = TestBlockPayload { + transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], + }; + + let (vid_trigger_sender, _) = oneshot::channel(); + let (_, vid_receiver) = unbounded_channel(); + + global_state_write_lock.blocks.put( + block_id, + BlockInfo { + block_payload: payload, + metadata: TestMetadata { + num_transactions: 1, + }, + vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), + vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( + vid_receiver, + ))), + offered_fee: 100, + truncated: false, + }, + ); + }; + + let claim_block_header_input_join_handle = spawn(async move { + let signature = + BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); + cloned_state + .claim_block_header_input_implementation( + &cloned_commitment, + 1, + leader_public_key, + &signature, + ) + .await + }); + + let result = claim_block_header_input_join_handle + .await + .expect("join error"); + + match result { + Err(ClaimBlockHeaderInputError::WaitAndKeepGetError(_)) => { + // This is what we expect. + // This message *should* indicate that the signature passed + // did not match the given public key. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + } + } + + /// This test checks that successful response is returned when the VID is + /// received in time. + #[tokio::test] + async fn test_claim_block_header_input_success() { + let (bootstrap_sender, _) = async_broadcast::broadcast(10); + let (tx_sender, _) = async_broadcast::broadcast(10); + let (builder_public_key, builder_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (leader_public_key, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let parent_commit = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + + let state = Arc::new(ProxyGlobalState::::new( + Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender, + parent_commit, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))), + (builder_public_key, builder_private_key.clone()), + Duration::from_secs(1), + )); + + let commitment = BuilderCommitment::from_bytes([0; 256]); + let cloned_commitment = commitment.clone(); + let cloned_state = state.clone(); + + let vid_sender = { + let mut global_state_write_lock = state.global_state.write_arc().await; + let block_id = BlockId { + hash: commitment, + view: ViewNumber::new(1), + }; + + let payload = TestBlockPayload { + transactions: vec![TestTransaction::new(vec![1, 2, 3, 4])], + }; + + let (vid_trigger_sender, _) = oneshot::channel(); + let (vid_sender, vid_receiver) = unbounded_channel(); + + global_state_write_lock.blocks.put( + block_id, + BlockInfo { + block_payload: payload, + metadata: TestMetadata { + num_transactions: 1, + }, + vid_trigger: Arc::new(async_lock::RwLock::new(Some(vid_trigger_sender))), + vid_receiver: Arc::new(async_lock::RwLock::new(crate::WaitAndKeep::Wait( + vid_receiver, + ))), + offered_fee: 100, + truncated: false, + }, + ); + + vid_sender + }; + + let claim_block_header_input_join_handle = spawn(async move { + let signature = + BLSPubKey::sign(&leader_private_key, cloned_commitment.as_ref()).unwrap(); + cloned_state + .claim_block_header_input_implementation( + &cloned_commitment, + 1, + leader_public_key, + &signature, + ) + .await + }); + + vid_sender + .send(precompute_vid_commitment(&[1, 2, 3, 4], 2)) + .unwrap(); + + let result = claim_block_header_input_join_handle.await; + + match result { + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + Ok(_) => { + // This is expected. + } + } + } + + // handle_da_event Tests + + /// This test checks that the error [HandleDaEventError::SignatureValidationFailed] + /// is returned under the right conditions of invoking + /// [handle_da_event_implementation]. + /// + /// To trigger this error, we simply need to ensure that signature provided + /// to the [Proposal] does not match the public key of the sender. + /// Additionally, the public keys passed for both the leader and the sender + /// need to match each other. + #[tokio::test] + async fn test_handle_da_event_implementation_error_signature_validation_failed() { + let (sender_public_key, _) = + ::generated_from_seed_indexed([0; 32], 0); + let (_, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let (da_channel_sender, _) = async_broadcast::broadcast(10); + let view_number = ViewNumber::new(10); + + let da_proposal = DaProposal:: { + encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), + metadata: TestMetadata { + num_transactions: 1, + }, // arbitrary + view_number, + }; + + let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); + let signature = + ::sign(&leader_private_key, &encoded_txns_hash).unwrap(); + + let signed_da_proposal = Arc::new(Proposal { + data: da_proposal, + signature, + _pd: Default::default(), + }); + + let result = handle_da_event_implementation( + &da_channel_sender, + signed_da_proposal.clone(), + sender_public_key, + ) + .await; + + match result { + Err(HandleDaEventError::SignatureValidationFailed) => { + // This is expected. + } + Ok(_) => { + panic!("expected an error, but received a successful attempt instead") + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + } + + /// This test checks that the error [HandleDaEventError::BroadcastFailed] + /// is returned under the right conditions of invoking + /// [handle_da_event_implementation]. + /// + /// To trigger this error, we simply need to ensure that the broadcast + /// channel receiver has been closed / dropped before the attempt to + /// send on the broadcast sender is performed. + #[tokio::test] + async fn test_handle_da_event_implementation_error_broadcast_failed() { + let (sender_public_key, sender_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let da_channel_sender = { + let (da_channel_sender, _) = async_broadcast::broadcast(10); + da_channel_sender + }; + + let view_number = ViewNumber::new(10); + + let da_proposal = DaProposal:: { + encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), + metadata: TestMetadata { + num_transactions: 1, + }, // arbitrary + view_number, + }; + + let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); + let signature = + ::sign(&sender_private_key, &encoded_txns_hash).unwrap(); + + let signed_da_proposal = Arc::new(Proposal { + data: da_proposal, + signature, + _pd: Default::default(), + }); + + let result = handle_da_event_implementation( + &da_channel_sender, + signed_da_proposal.clone(), + sender_public_key, + ) + .await; + + match result { + Err(HandleDaEventError::BroadcastFailed(_)) => { + // This error is expected + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + } + + /// This test checks the expected successful behavior of the + /// [handle_da_event_implementation] function. + #[tokio::test] + async fn test_handle_da_event_implementation_success() { + let (sender_public_key, sender_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (da_channel_sender, da_channel_receiver) = async_broadcast::broadcast(10); + let view_number = ViewNumber::new(10); + + let da_proposal = DaProposal:: { + encoded_transactions: Arc::new([1, 2, 3, 4, 5, 6]), + metadata: TestMetadata { + num_transactions: 1, + }, // arbitrary + view_number, + }; + + let encoded_txns_hash = Sha256::digest(&da_proposal.encoded_transactions); + let signature = + ::sign(&sender_private_key, &encoded_txns_hash).unwrap(); + + let signed_da_proposal = Arc::new(Proposal { + data: da_proposal, + signature, + _pd: Default::default(), + }); + + let result = handle_da_event_implementation( + &da_channel_sender, + signed_da_proposal.clone(), + sender_public_key, + ) + .await; + + match result { + Ok(_) => { + // This is expected. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + + let mut da_channel_receiver = da_channel_receiver; + match da_channel_receiver.next().await { + Some(MessageType::DaProposalMessage(da_proposal_message)) => { + assert_eq!(da_proposal_message.proposal, signed_da_proposal); + } + _ => { + panic!("Expected a DaProposalMessage, but got something else"); + } + } + } + + // handle_quorum_event Tests + + /// This test checks that the error [HandleQuorumEventError::SignatureValidationFailed] + /// is returned under the right conditions of invoking + /// [handle_quorum_event_implementation]. + /// + /// To trigger this error, we simply need to ensure that the signature + /// provided to the [Proposal] does not match the public key of the sender. + /// + /// Additionally, the public keys passed for both the leader and the sender + /// need to match each other. + #[tokio::test] + async fn test_handle_quorum_event_error_signature_validation_failed() { + let (sender_public_key, _) = + ::generated_from_seed_indexed([0; 32], 0); + let (_, leader_private_key) = + ::generated_from_seed_indexed([0; 32], 1); + let (quorum_channel_sender, _) = async_broadcast::broadcast(10); + let view_number = ViewNumber::new(10); + + let quorum_proposal = { + let leaf = Leaf::::genesis( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await; + + QuorumProposal:: { + block_header: leaf.block_header().clone(), + view_number, + justify_qc: QuorumCertificate::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, + upgrade_certificate: None, + proposal_certificate: None, + } + }; + + let leaf = Leaf::from_quorum_proposal(&quorum_proposal); + + let signature = + ::sign(&leader_private_key, leaf.legacy_commit().as_ref()) + .unwrap(); + + let signed_quorum_proposal = Arc::new(Proposal { + data: quorum_proposal, + signature, + _pd: Default::default(), + }); + + let result = handle_quorum_event_implementation( + &quorum_channel_sender, + signed_quorum_proposal.clone(), + sender_public_key, + ) + .await; + + match result { + Err(HandleQuorumEventError::SignatureValidationFailed) => { + // This is expected. + } + Ok(_) => { + panic!("expected an error, but received a successful attempt instead"); + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + } + + /// This test checks that the error [HandleQuorumEventError::BroadcastFailed] + /// is returned under the right conditions of invoking + /// [handle_quorum_event_implementation]. + /// + /// To trigger this error, we simply need to ensure that the broadcast + /// channel receiver has been closed / dropped before the attempt to + /// send on the broadcast sender is performed. + #[tokio::test] + async fn test_handle_quorum_event_error_broadcast_failed() { + let (sender_public_key, sender_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let quorum_channel_sender = { + let (quorum_channel_sender, _) = async_broadcast::broadcast(10); + quorum_channel_sender + }; + + let view_number = ViewNumber::new(10); + + let quorum_proposal = { + let leaf = Leaf::::genesis( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await; + + QuorumProposal:: { + block_header: leaf.block_header().clone(), + view_number, + justify_qc: QuorumCertificate::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, + upgrade_certificate: None, + proposal_certificate: None, + } + }; + + let leaf = Leaf::from_quorum_proposal(&quorum_proposal); + + let signature = + ::sign(&sender_private_key, leaf.legacy_commit().as_ref()) + .unwrap(); + + let signed_quorum_proposal = Arc::new(Proposal { + data: quorum_proposal, + signature, + _pd: Default::default(), + }); + + let result = handle_quorum_event_implementation( + &quorum_channel_sender, + signed_quorum_proposal.clone(), + sender_public_key, + ) + .await; + + match result { + Err(HandleQuorumEventError::BroadcastFailed(_)) => { + // This is expected. + } + Ok(_) => { + panic!("Expected an error, but got a result"); + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + } + + /// This test checks to ensure that [handle_quorum_event_implementation] + /// completes successfully as expected when the correct conditions are met. + #[tokio::test] + async fn test_handle_quorum_event_success() { + let (sender_public_key, sender_private_key) = + ::generated_from_seed_indexed([0; 32], 0); + let (quorum_channel_sender, quorum_channel_receiver) = async_broadcast::broadcast(10); + let view_number = ViewNumber::new(10); + + let quorum_proposal = { + let leaf = Leaf::::genesis( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await; + + QuorumProposal:: { + block_header: leaf.block_header().clone(), + view_number, + justify_qc: QuorumCertificate::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, + upgrade_certificate: None, + proposal_certificate: None, + } + }; + + let leaf = Leaf::from_quorum_proposal(&quorum_proposal); + + let signature = + ::sign(&sender_private_key, leaf.legacy_commit().as_ref()) + .unwrap(); + + let signed_quorum_proposal = Arc::new(Proposal { + data: quorum_proposal, + signature, + _pd: Default::default(), + }); + + let result = handle_quorum_event_implementation( + &quorum_channel_sender, + signed_quorum_proposal.clone(), + sender_public_key, + ) + .await; + + match result { + Ok(_) => { + // This is expected. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + + let mut quorum_channel_receiver = quorum_channel_receiver; + match quorum_channel_receiver.next().await { + Some(MessageType::QuorumProposalMessage(da_proposal_message)) => { + assert_eq!(da_proposal_message.proposal, signed_quorum_proposal); + } + _ => { + panic!("Expected a QuorumProposalMessage, but got something else"); + } + } + } + + // HandleReceivedTxns Tests + + /// This test checks that the error [HandleReceivedTxnsError::TooManyTransactions] + /// is returned when the conditions are met. + /// + /// To trigger this error we simply provide a broadcast channel with a + /// buffer smaller than the number of transactions we are attempting to + /// send through it. + #[tokio::test] + async fn test_handle_received_txns_error_too_many_transactions() { + let (tx_sender, tx_receiver) = async_broadcast::broadcast(2); + let num_transactions = 5; + let mut txns = Vec::with_capacity(num_transactions); + for index in 0..num_transactions { + txns.push(TestTransaction::new(vec![index as u8])); + } + let txns = txns; + + { + let mut handle_received_txns_iter = HandleReceivedTxns::::new( + tx_sender, + txns.clone(), + TransactionSource::HotShot, + TEST_MAX_TX_LEN, + ); + + assert!(handle_received_txns_iter.next().is_some()); + assert!(handle_received_txns_iter.next().is_some()); + match handle_received_txns_iter.next() { + Some(Err(HandleReceivedTxnsError::TooManyTransactions)) => { + // This is expected, + } + Some(Err(err)) => { + panic!("Unexpected error: {:?}", err); + } + Some(Ok(_)) => { + panic!("Expected an error, but got a result"); + } + None => { + panic!("Expected an error, but got a result"); + } + } + } + + let mut tx_receiver = tx_receiver; + assert!(tx_receiver.next().await.is_some()); + assert!(tx_receiver.next().await.is_some()); + assert!(tx_receiver.next().await.is_none()); + } + + /// This test checks that the error [HandleReceivedTxnsError::TransactionTooBig] + /// when the conditions are met. + /// + /// To trigger this error we simply provide a [TestTransaction] whose size + /// exceeds the maximum transaction length. we pass to [HandleReceivedTxns]. + #[tokio::test] + async fn test_handle_received_txns_error_transaction_too_big() { + let (tx_sender, tx_receiver) = async_broadcast::broadcast(10); + let num_transactions = 2; + let mut txns = Vec::with_capacity(num_transactions + 1); + for index in 0..num_transactions { + txns.push(TestTransaction::new(vec![index as u8])); + } + txns.push(TestTransaction::new(vec![0; 256])); + let txns = txns; + + { + let mut handle_received_txns_iter = HandleReceivedTxns::::new( + tx_sender, + txns.clone(), + TransactionSource::HotShot, + TEST_MAX_TX_LEN, + ); + + assert!(handle_received_txns_iter.next().is_some()); + assert!(handle_received_txns_iter.next().is_some()); + match handle_received_txns_iter.next() { + Some(Err(HandleReceivedTxnsError::TransactionTooBig { + estimated_length, + max_txn_len, + })) => { + // This is expected, + assert!(estimated_length >= 256); + assert_eq!(max_txn_len, TEST_MAX_TX_LEN); + } + Some(Err(err)) => { + panic!("Unexpected error: {:?}", err); + } + Some(Ok(_)) => { + panic!("Expected an error, but got a result"); + } + None => { + panic!("Expected an error, but got a result"); + } + } + } + + let mut tx_receiver = tx_receiver; + assert!(tx_receiver.next().await.is_some()); + assert!(tx_receiver.next().await.is_some()); + assert!(tx_receiver.next().await.is_none()); + } + + /// This test checks that the error [HandleReceivedTxnsError::Internal] + /// is returned when the broadcast channel is closed. + /// + /// To trigger this error we simply close the broadcast channel receiver + /// before attempting to send any transactions through the broadcast channel + /// sender. + #[tokio::test] + async fn test_handle_received_txns_error_internal() { + let tx_sender = { + let (tx_sender, _) = async_broadcast::broadcast(10); + tx_sender + }; + + let num_transactions = 10; + let mut txns = Vec::with_capacity(num_transactions); + for index in 0..num_transactions { + txns.push(TestTransaction::new(vec![index as u8])); + } + txns.push(TestTransaction::new(vec![0; 256])); + let txns = txns; + + { + let mut handle_received_txns_iter = HandleReceivedTxns::::new( + tx_sender, + txns.clone(), + TransactionSource::HotShot, + TEST_MAX_TX_LEN, + ); + + match handle_received_txns_iter.next() { + Some(Err(HandleReceivedTxnsError::Internal(err))) => { + // This is expected, + + match err { + async_broadcast::TrySendError::Closed(_) => { + // This is expected. + } + _ => { + panic!("Unexpected error: {:?}", err); + } + } + } + Some(Err(err)) => { + panic!("Unexpected error: {:?}", err); + } + Some(Ok(_)) => { + panic!("Expected an error, but got a result"); + } + None => { + panic!("Expected an error, but got a result"); + } + } + } + } + + /// This test checks that [HandleReceivedTxns] processes completely without + /// issue when the conditions are correct for it to do so. + #[tokio::test] + async fn test_handle_received_txns_success() { + let (tx_sender, tx_receiver) = async_broadcast::broadcast(10); + let num_transactions = 10; + let mut txns = Vec::with_capacity(num_transactions); + for index in 0..num_transactions { + txns.push(TestTransaction::new(vec![index as u8])); + } + let txns = txns; + + let handle_received_txns_iter = HandleReceivedTxns::::new( + tx_sender, + txns.clone(), + TransactionSource::HotShot, + TEST_MAX_TX_LEN, + ); + + for iteration in handle_received_txns_iter { + match iteration { + Ok(_) => { + // This is expected. + } + Err(err) => { + panic!("Unexpected error: {:?}", err); + } + } + } + + let mut tx_receiver = tx_receiver; + for tx in txns { + match tx_receiver.next().await { + Some(received_txn) => { + assert_eq!(received_txn.tx, tx); + } + _ => { + panic!("Expected a TransactionMessage, but got something else"); + } + } + } + } + + /// This test checks builder does save the status of transactions correctly + #[tokio::test] + async fn test_get_txn_status() { + let (proxy_global_state, _, da_proposal_sender, quorum_proposal_sender, _) = + setup_builder_for_test(); + tracing::debug!("start tests on correctly setting transaction status."); + + let mut round = 0; + let mut current_builder_state_id = BuilderStateId:: { + parent_commitment: vid_commitment(&[], 8), + parent_view: ViewNumber::genesis(), + }; + current_builder_state_id = progress_round_without_available_block_info( + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + + // round 1: test status Pending + let num_transactions = 10; + let mut txns = Vec::with_capacity(num_transactions); + for index in 0..num_transactions { + txns.push(TestTransaction::new(vec![index as u8])); + } + let txns = txns; + proxy_global_state + .submit_txns(txns.clone()) + .await + .expect("should submit transaction without issue"); + // advance the round + { + round = 1; + let (_attempts, available_available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + current_builder_state_id = progress_round_with_available_block_info( + &proxy_global_state, + available_available_blocks_result.unwrap()[0].clone(), + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } + // tx submitted in round 1 should be pending + for tx in txns.clone() { + match proxy_global_state.txn_status(tx.commit()).await { + Ok(txn_status) => { + assert_eq!(txn_status, TransactionStatus::Pending); + } + e => { + panic!("transaction status should be Pending instead of {:?}", e); + } + } + } + + // round 2: test status Pending again + let mut txns_2 = Vec::with_capacity(num_transactions); + for index in 0..num_transactions { + txns_2.push(TestTransaction::new(vec![(num_transactions + index) as u8])); + } + let txns_2 = txns_2; + proxy_global_state + .submit_txns(txns_2.clone()) + .await + .expect("should submit transaction without issue"); + // advance the round + { + round = 2; + let (_attempts, available_available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + progress_round_with_available_block_info( + &proxy_global_state, + available_available_blocks_result.unwrap()[0].clone(), + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } + // tx submitted in round 2 should be pending + for tx in txns_2.clone() { + match proxy_global_state.txn_status(tx.commit()).await { + Ok(txn_status) => { + assert_eq!(txn_status, TransactionStatus::Pending); + } + e => { + panic!("transaction status should be Pending instead of {:?}", e); + } + } + } + + // round 3: test status Rejected with correct error message + let big_txns = vec![TestTransaction::new(vec![ + 0; + TEST_PROTOCOL_MAX_BLOCK_SIZE + as usize + + 1 + ])]; + let _ = proxy_global_state.submit_txns(big_txns.clone()).await; + for tx in big_txns.clone() { + match proxy_global_state.txn_status(tx.commit()).await { + Ok(txn_status) => { + if tx.minimum_block_size() > TEST_PROTOCOL_MAX_BLOCK_SIZE { + tracing::debug!( + "In test_get_txn_status(), txn_status of large tx = {:?}", + txn_status + ); + matches!(txn_status, TransactionStatus::Rejected { .. }); + if let TransactionStatus::Rejected { reason } = txn_status { + assert!(reason.contains("Transaction too big")); + } + } else { + assert_eq!(txn_status, TransactionStatus::Pending); + } + } + e => { + panic!( + "transaction status should be a valid status instead of {:?}", + e + ); + } + } + } + + { + // Test a rejected txn marked as other status again + let mut write_guard = proxy_global_state.global_state.write_arc().await; + for tx in big_txns { + match write_guard + .set_txn_status(tx.commit(), TransactionStatus::Pending) + .await + { + Err(err) => { + panic!("Expected a result, but got a error {:?}", err); + } + _ => { + // This is expected + } + } + + match write_guard.txn_status(tx.commit()).await { + Ok(txn_status) => { + assert_eq!(txn_status, TransactionStatus::Pending); + } + e => { + panic!( + "transaction status should be a valid status instead of {:?}", + e + ); + } + } + } + } + + { + // Test a sequenced txn cannot be marked as other status again + let mut write_guard = proxy_global_state.global_state.write_arc().await; + let tx_test_assigned_twice = + TestTransaction::new(vec![(num_transactions * 3 + 1) as u8]); + write_guard + .set_txn_status( + tx_test_assigned_twice.commit(), + TransactionStatus::Sequenced { leaf: 0 }, + ) + .await + .unwrap(); + match write_guard + .set_txn_status(tx_test_assigned_twice.commit(), TransactionStatus::Pending) + .await + { + Err(_err) => { + // This is expected + } + _ => { + panic!("Expected an error, but got a result"); + } + } + } + + { + // Test status Unknown when the txn is unknown + let unknown_tx = TestTransaction::new(vec![(num_transactions * 4 + 1) as u8]); + match proxy_global_state.txn_status(unknown_tx.commit()).await { + Ok(txn_status) => { + assert_eq!(txn_status, TransactionStatus::Unknown); + } + e => { + panic!("transaction status should be Unknown instead of {:?}", e); + } + } + } + } + + #[test] + fn test_increment_block_size() { + let mut block_size_limits = + BlockSizeLimits::new(TEST_PROTOCOL_MAX_BLOCK_SIZE, Duration::from_millis(25)); + // Simulate decreased limits + block_size_limits.max_block_size = TEST_PROTOCOL_MAX_BLOCK_SIZE / 2; + + // Shouldn't increment, increment period hasn't passed yet + block_size_limits.try_increment_block_size(false); + assert!(block_size_limits.max_block_size == TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); + + // Should increment, increment period hasn't passed yet, but force flag is set + block_size_limits.try_increment_block_size(true); + assert!(block_size_limits.max_block_size > TEST_PROTOCOL_MAX_BLOCK_SIZE / 2); + let new_size = block_size_limits.max_block_size; + + std::thread::sleep(Duration::from_millis(30)); + + // Should increment, increment period has passed + block_size_limits.try_increment_block_size(false); + assert!(block_size_limits.max_block_size > new_size); + } + + #[test] + fn test_decrement_block_size() { + let mut block_size_limits = BlockSizeLimits::new( + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + ); + block_size_limits.decrement_block_size(); + assert!(block_size_limits.max_block_size < TEST_PROTOCOL_MAX_BLOCK_SIZE); + } + + #[test] + fn test_max_block_size_floor() { + let mut block_size_limits = BlockSizeLimits::new( + BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + 1, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + ); + block_size_limits.decrement_block_size(); + assert_eq!( + block_size_limits.max_block_size, + BlockSizeLimits::MAX_BLOCK_SIZE_FLOOR + ); + } +} diff --git a/crates/legacy/src/old/testing/basic_test.rs b/crates/legacy/src/old/testing/basic_test.rs new file mode 100644 index 00000000..57e716cb --- /dev/null +++ b/crates/legacy/src/old/testing/basic_test.rs @@ -0,0 +1,516 @@ +pub use hotshot::traits::election::static_committee::StaticCommittee; +pub use hotshot_types::{ + data::{DaProposal, EpochNumber, Leaf, QuorumProposal, ViewNumber}, + message::Proposal, + signature_key::BLSPubKey, + simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + traits::{ + block_contents::BlockPayload, + node_implementation::{ConsensusTime, NodeType}, + }, +}; + +pub use crate::builder_state::{BuilderState, MessageType}; +pub use async_broadcast::broadcast; +/// The following tests are performed: +#[cfg(test)] +mod tests { + use super::*; + use std::collections::VecDeque; + use std::{hash::Hash, marker::PhantomData}; + + use hotshot::types::SignatureKey; + use hotshot_builder_api::v0_2::data_source::BuilderDataSource; + use hotshot_example_types::auction_results_provider_types::TestAuctionResult; + use hotshot_example_types::node_types::TestVersions; + use hotshot_types::{ + signature_key::BuilderKey, + simple_vote::QuorumData, + traits::block_contents::{vid_commitment, BlockHeader}, + utils::BuilderCommitment, + }; + + use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + state_types::{TestInstanceState, TestValidatedState}, + }; + use marketplace_builder_shared::block::ParentBlockReferences; + use marketplace_builder_shared::testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + }; + use tokio::time::error::Elapsed; + use tokio::time::timeout; + use tracing_subscriber::EnvFilter; + + use crate::builder_state::{ + DaProposalMessage, DecideMessage, QuorumProposalMessage, TransactionSource, + }; + use crate::implementation::LegacyCommit; + use crate::service::{ + handle_received_txns, GlobalState, ProxyGlobalState, ReceivedTransaction, + }; + use async_lock::RwLock; + use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use sha2::{Digest, Sha256}; + use std::sync::Arc; + use std::time::Duration; + + use serde::{Deserialize, Serialize}; + /// This test simulates multiple builder states receiving messages from the channels and processing them + #[tokio::test] + //#[instrument] + async fn test_builder() { + // Setup logging + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + tracing::info!("Testing the builder core with multiple messages from the channels"); + #[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, + )] + struct TestTypes; + impl NodeType for TestTypes { + type View = ViewNumber; + type Epoch = EpochNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = StaticCommittee; + type BuilderSignatureKey = BuilderKey; + type AuctionResult = TestAuctionResult; + } + // no of test messages to send + let num_test_messages = 5; + let multiplication_factor = 5; + const NUM_NODES_IN_VID_COMPUTATION: usize = 4; + + // settingup the broadcast channels i.e [From hostshot: (tx, decide, da, quorum, )], [From api:(req - broadcast, res - mpsc channel) ] + let (decide_sender, decide_receiver) = + broadcast::>(num_test_messages * multiplication_factor); + let (da_sender, da_receiver) = + broadcast::>(num_test_messages * multiplication_factor); + let (quorum_sender, quorum_proposal_receiver) = + broadcast::>(num_test_messages * multiplication_factor); + let (bootstrap_sender, bootstrap_receiver) = + broadcast::>(num_test_messages * multiplication_factor); + let (tx_sender, tx_receiver) = broadcast::>>( + num_test_messages * multiplication_factor, + ); + let tx_queue = VecDeque::new(); + // generate the keys for the buidler + let seed = [201_u8; 32]; + let (builder_pub_key, builder_private_key) = + BLSPubKey::generated_from_seed_indexed(seed, 2011_u64); + // instantiate the global state also + let initial_commitment = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let global_state = Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender.clone(), + initial_commitment, + ViewNumber::new(0), + ViewNumber::new(0), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))); + + let bootstrap_builder_state = BuilderState::new( + ParentBlockReferences { + view_number: ViewNumber::new(0), + vid_commitment: initial_commitment, + leaf_commit: Commitment::>::default_commitment_no_preimage(), + builder_commitment: BuilderCommitment::from_bytes([]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + decide_receiver.clone(), + da_receiver.clone(), + quorum_proposal_receiver.clone(), + bootstrap_receiver, + tx_receiver, + tx_queue, + global_state.clone(), + Duration::from_millis(100), + 1, + Arc::new(TestInstanceState::default()), + Duration::from_millis(100), + Arc::new(TestValidatedState::default()), + ); + + // Kick off async look for the bootstrap builder state + bootstrap_builder_state.event_loop(); + + let proxy_global_state = ProxyGlobalState::new( + global_state.clone(), + (builder_pub_key, builder_private_key), + Duration::from_millis(100), + ); + + // to store all the sent messages + // storing response messages + let mut previous_commitment = initial_commitment; + let mut previous_view = ViewNumber::new(0); + let mut previous_quorum_proposal = { + let previous_jc = QuorumCertificate::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await; + + QuorumProposal:: { + block_header: TestBlockHeader { + block_number: 0, + payload_commitment: previous_commitment, + builder_commitment: BuilderCommitment::from_bytes([]), + timestamp: 0, + metadata: TestMetadata { + num_transactions: 0, + }, + random: 1, // arbitrary + }, + view_number: ViewNumber::new(0), + justify_qc: previous_jc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + } + }; + + // generate num_test messages for each type and send it to the respective channels; + for round in 0..num_test_messages as u32 { + // Submit Transactions to the Builder + { + // Prepare the transaction message + let tx = TestTransaction::new(vec![round as u8]); + + let tx_vec = vec![tx]; + assert_eq!( + handle_received_txns( + &tx_sender, + tx_vec.clone(), + TransactionSource::HotShot, + u64::MAX, + ) + .await + .into_iter() + .map(|res| res.unwrap()) + .collect::>(), + tx_vec.iter().map(|tx| tx.commit()).collect::>(), + "handle_received_txns should have the commits for all transactions submitted", + ); + } + + // Request available blocks from the builder + { + let (leader_pub, leader_priv) = + BLSPubKey::generated_from_seed_indexed(seed, round as u64); + + let commitment_signature = + ::sign(&leader_priv, previous_commitment.as_ref()) + .unwrap(); + + let available_blocks = proxy_global_state + .available_blocks( + &previous_commitment, + previous_view.u64(), + leader_pub, + &commitment_signature, + ) + .await + .unwrap(); + + // The available blocks should **NOT** be empty + assert!( + available_blocks.len() == 1, + "available blocks should return a single entry" + ); + assert!( + available_blocks[0].offered_fee >= 1, + "offered fee should be greater than 1" + ); + + let block_hash = available_blocks[0].block_hash.clone(); + + // Let's claim this block, and this block header + let block_hash_signature = + ::sign(&leader_priv, block_hash.as_ref()).unwrap(); + + let claimed_block = proxy_global_state + .claim_block_with_num_nodes( + &block_hash, + previous_view.u64(), + leader_pub, + &block_hash_signature, + // Increment to test whether `num_nodes` is updated properly. + TEST_NUM_NODES_IN_VID_COMPUTATION + 1, + ) + .await + .unwrap(); + + let _claimed_block_header = proxy_global_state + .claim_block_header_input( + &block_hash, + previous_view.u64(), + leader_pub, + &block_hash_signature, + ) + .await + .unwrap(); + + // Create the proposals from the transactions contained within + // the claim_block result. + + let proposed_transactions = claimed_block.block_payload.transactions.clone(); + assert_eq!( + proposed_transactions.len(), + 1, + "there should be one transaction in the proposed block" + ); + + let encoded_transactions = TestTransaction::encode(&proposed_transactions); + + // Prepare the DA proposal message + let da_proposal_message = { + let da_proposal = DaProposal { + encoded_transactions: encoded_transactions.clone().into(), + metadata: TestMetadata { + num_transactions: encoded_transactions.len() as u64, + }, + view_number: ViewNumber::new(round as u64), + }; + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let seed = [round as u8; 32]; + let (pub_key, private_key) = + BLSPubKey::generated_from_seed_indexed(seed, round as u64); + let da_signature = + ::SignatureKey::sign( + &private_key, + &encoded_transactions_hash, + ) + .expect("Failed to sign encoded tx hash while preparing da proposal"); + + DaProposalMessage:: { + proposal: Arc::new(Proposal { + data: da_proposal, + signature: da_signature.clone(), + _pd: PhantomData, + }), + sender: pub_key, + } + }; + + // Prepare the Quorum proposal message + // calculate the vid commitment over the encoded_transactions + let quorum_certificate_message = { + let block_payload = claimed_block.block_payload.clone(); + let metadata = claimed_block.metadata; + + tracing::debug!( + "Encoded transactions: {:?} Num nodes:{}", + encoded_transactions, + NUM_NODES_IN_VID_COMPUTATION + ); + + let block_payload_commitment = + vid_commitment(&encoded_transactions, NUM_NODES_IN_VID_COMPUTATION); + + tracing::debug!( + "Block Payload vid commitment: {:?}", + block_payload_commitment + ); + + let builder_commitment = + >::builder_commitment( + &block_payload, + &metadata, + ); + + let block_header = TestBlockHeader { + block_number: round as u64, + payload_commitment: block_payload_commitment, + builder_commitment, + timestamp: round as u64, + metadata, + random: 1, // arbitrary + }; + + let justify_qc = { + let previous_justify_qc = previous_quorum_proposal.justify_qc.clone(); + // metadata + let _metadata = >::metadata( + &previous_quorum_proposal.block_header, + ); + let leaf = Leaf::from_quorum_proposal(&previous_quorum_proposal); + + let q_data = QuorumData:: { + leaf_commit: leaf.legacy_commit(), + }; + + let previous_quorum_view_number = + previous_quorum_proposal.view_number.u64(); + let view_number = if previous_quorum_view_number == 0 + && previous_justify_qc.view_number.u64() == 0 + { + ViewNumber::new(0) + } else { + ViewNumber::new(1 + previous_justify_qc.view_number.u64()) + }; + // form a justify qc + SimpleCertificate::, SuccessThreshold>::new( + q_data.clone(), + q_data.commit(), + view_number, + previous_justify_qc.signatures.clone(), + PhantomData, + ) + }; + + tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); + + let quorum_proposal = QuorumProposal:: { + block_header, + view_number: ViewNumber::new(round as u64), + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let payload_vid_commitment = + >::payload_commitment( + &quorum_proposal.block_header, + ); + + let quorum_signature = ::sign( + &leader_priv, + payload_vid_commitment.as_ref(), + ) + .expect("Failed to sign payload commitment while preparing Quorum proposal"); + + QuorumProposalMessage:: { + proposal: Arc::new(Proposal { + data: quorum_proposal.clone(), + signature: quorum_signature, + _pd: PhantomData, + }), + sender: leader_pub, + } + }; + + // Prepare the Decide Message + // The Decide is mainly for cleanup of old BuilderStates. + // This may not be necessary for this test + let decide_message = { + let leaf = match round { + 0 => { + Leaf::genesis( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } + _ => { + let block_payload = BlockPayload::::from_bytes( + &encoded_transactions, + >::metadata( + &quorum_certificate_message.proposal.data.block_header, + ), + ); + let mut current_leaf = Leaf::from_quorum_proposal( + &quorum_certificate_message.proposal.data, + ); + current_leaf + .fill_block_payload(block_payload, NUM_NODES_IN_VID_COMPUTATION) + .unwrap(); + current_leaf + } + }; + + DecideMessage:: { + latest_decide_view_number: leaf.view_number(), + } + }; + + // Increment the view and the previous commitment + previous_commitment = quorum_certificate_message + .proposal + .data + .block_header + .payload_commitment; + previous_view = quorum_certificate_message.proposal.data.view_number; + previous_quorum_proposal = + quorum_certificate_message.proposal.as_ref().data.clone(); + + // Broadcast the DA proposal + da_sender + .broadcast(MessageType::DaProposalMessage(da_proposal_message)) + .await + .unwrap(); + + // Broadcast the Quorum Certificate + quorum_sender + .broadcast(MessageType::QuorumProposalMessage( + quorum_certificate_message, + )) + .await + .unwrap(); + + // Send the Decide Message + decide_sender + .broadcast(MessageType::DecideMessage(decide_message)) + .await + .unwrap(); + } + } + + // We cloned these receivers to ensure that progress was being made + // by the Builder processes. Using these broadcast receivers we can + // verify the number of messages received in this entire process, as + // well as the order of them, should we be so inclined. + + // There should be num_test_messages messages in the receivers + let mut da_receiver = da_receiver; + let mut quorum_proposal_receiver = quorum_proposal_receiver; + let mut decide_receiver = decide_receiver; + for _ in 0..num_test_messages { + da_receiver.recv().await.unwrap(); + quorum_proposal_receiver.recv().await.unwrap(); + decide_receiver.recv().await.unwrap(); + } + + // There should not be any other messages to receive + let Err(Elapsed { .. }) = timeout(Duration::from_millis(100), da_receiver.recv()).await + else { + panic!("There should not be any more messages in the da_receiver"); + }; + let Err(Elapsed { .. }) = + timeout(Duration::from_millis(100), quorum_proposal_receiver.recv()).await + else { + panic!("There should not be any more messages in the da_receiver"); + }; + let Err(Elapsed { .. }) = timeout(Duration::from_millis(100), decide_receiver.recv()).await + else { + panic!("There should not be any more messages in the da_receiver"); + }; + + // Verify `num_nodes`. + assert_eq!( + global_state.read_arc().await.num_nodes, + TEST_NUM_NODES_IN_VID_COMPUTATION + 1 + ); + } +} diff --git a/crates/legacy/src/old/testing/finalization_test.rs b/crates/legacy/src/old/testing/finalization_test.rs new file mode 100644 index 00000000..b683dfa5 --- /dev/null +++ b/crates/legacy/src/old/testing/finalization_test.rs @@ -0,0 +1,558 @@ +use std::{sync::Arc, time::Duration}; + +use super::basic_test::{BuilderState, MessageType}; +use crate::{ + builder_state::{DaProposalMessage, QuorumProposalMessage, ALLOW_EMPTY_BLOCK_PERIOD}, + service::{GlobalState, ProxyGlobalState, ReceivedTransaction}, +}; +use async_broadcast::{broadcast, Sender}; +use async_lock::RwLock; +use committable::Commitment; +use hotshot::{ + traits::BlockPayload, + types::{BLSPubKey, SignatureKey}, +}; +use hotshot_builder_api::{ + v0_2::{block_info::AvailableBlockInfo, data_source::BuilderDataSource}, + v0_3::{builder::BuildError, data_source::AcceptsTxnSubmits}, +}; +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; +use hotshot_types::{ + data::{DaProposal, QuorumProposal, ViewNumber}, + message::Proposal, + simple_certificate::QuorumCertificate, + traits::{ + block_contents::{vid_commitment, BlockHeader}, + node_implementation::ConsensusTime, + }, + utils::BuilderCommitment, +}; +use marketplace_builder_shared::testing::constants::{ + TEST_CHANNEL_BUFFER_SIZE, TEST_MAX_TX_NUM, TEST_NUM_CONSENSUS_RETRIES, + TEST_NUM_NODES_IN_VID_COMPUTATION, +}; +use marketplace_builder_shared::{ + block::BuilderStateId, testing::constants::TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, +}; +use marketplace_builder_shared::{ + block::ParentBlockReferences, testing::constants::TEST_PROTOCOL_MAX_BLOCK_SIZE, +}; +use sha2::{Digest, Sha256}; + +type TestSetup = ( + ProxyGlobalState, + async_broadcast::Sender>, + async_broadcast::Sender>, + async_broadcast::Sender>, + async_broadcast::Sender>>, +); + +/// [`setup_builder_for_test`] sets up a test environment for the builder state. +/// It returns a tuple containing the proxy global state, the sender for decide +/// messages, the sender for data availability proposals, +pub fn setup_builder_for_test() -> TestSetup { + let (req_sender, req_receiver) = broadcast(TEST_CHANNEL_BUFFER_SIZE); + let (tx_sender, tx_receiver) = broadcast(TEST_CHANNEL_BUFFER_SIZE); + + let parent_commitment = vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION); + let bootstrap_builder_state_id = BuilderStateId:: { + parent_commitment, + parent_view: ViewNumber::genesis(), + }; + + let global_state = Arc::new(RwLock::new(GlobalState::new( + req_sender, + tx_sender.clone(), + bootstrap_builder_state_id.parent_commitment, + bootstrap_builder_state_id.parent_view, + bootstrap_builder_state_id.parent_view, + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + TEST_NUM_NODES_IN_VID_COMPUTATION, + TEST_MAX_TX_NUM, + ))); + + let max_api_duration = Duration::from_millis(100); + + let proxy_global_state = ProxyGlobalState::new( + global_state.clone(), + BLSPubKey::generated_from_seed_indexed([1; 32], 0), + max_api_duration, + ); + + let (decide_sender, decide_receiver) = broadcast(TEST_CHANNEL_BUFFER_SIZE); + let (da_proposal_sender, da_proposal_receiver) = broadcast(TEST_CHANNEL_BUFFER_SIZE); + let (quorum_proposal_sender, quorum_proposal_receiver) = broadcast(TEST_CHANNEL_BUFFER_SIZE); + let bootstrap_builder_state = BuilderState::::new( + ParentBlockReferences { + vid_commitment: parent_commitment, + view_number: ViewNumber::genesis(), + leaf_commit: Commitment::from_raw([0; 32]), + builder_commitment: BuilderCommitment::from_bytes([0; 32]), + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + decide_receiver, + da_proposal_receiver, + quorum_proposal_receiver, + req_receiver, + tx_receiver, + Default::default(), + global_state.clone(), + Duration::from_millis(40), + 1, + Default::default(), + Duration::from_secs(1), + Default::default(), + ); + + bootstrap_builder_state.event_loop(); + + ( + proxy_global_state, + decide_sender, + da_proposal_sender, + quorum_proposal_sender, + tx_sender, + ) +} + +/// [`process_available_blocks_round`] processes available rounds for a given +/// round. It returns the number of attempts made to get the available blocks +/// and the result of the available blocks. +/// +/// By default Consensus will retry 3-4 times to get available blocks from the +/// Builder. +pub async fn process_available_blocks_round( + proxy_global_state: &ProxyGlobalState, + builder_state_id: BuilderStateId, + round: u64, +) -> ( + usize, + Result>, BuildError>, +) { + let (leader_pub, leader_priv) = BLSPubKey::generated_from_seed_indexed([0; 32], round); + + let current_commit_signature = ::sign( + &leader_priv, + builder_state_id.parent_commitment.as_ref(), + ) + .unwrap(); + + // Simulate Consensus retries + + let mut attempt = 0; + loop { + attempt += 1; + + let available_blocks_result = proxy_global_state + .available_blocks( + &builder_state_id.parent_commitment, + builder_state_id.parent_view.u64(), + leader_pub, + ¤t_commit_signature, + ) + .await; + + if available_blocks_result.is_ok() { + return (attempt, available_blocks_result); + } + + if attempt >= TEST_NUM_CONSENSUS_RETRIES { + return (attempt, available_blocks_result); + } + } +} + +/// [`progress_round_with_available_block_info`] is a helper function that +/// progresses the round with the information returned from a call to +/// [`process_available_blocks_round`]. This function simulates decide events +/// if the next call to [`ProxyGlobalState::available_blocks`] returns something +/// successfully rather than an error. +/// +/// This is the workflow that happens if the builder has a block to propose, +/// and the block is included by consensus. +pub async fn progress_round_with_available_block_info( + proxy_global_state: &ProxyGlobalState, + available_block_info: AvailableBlockInfo, + builder_state_id: BuilderStateId, + round: u64, + da_proposal_sender: &Sender>, + quorum_proposal_sender: &Sender>, +) -> BuilderStateId { + let (leader_pub, leader_priv) = BLSPubKey::generated_from_seed_indexed([0; 32], round); + + let signed_parent_commitment = + ::sign(&leader_priv, available_block_info.block_hash.as_ref()) + .unwrap(); + + let claim_block_result = proxy_global_state + .claim_block( + &available_block_info.block_hash, + builder_state_id.parent_view.u64(), + leader_pub, + &signed_parent_commitment, + ) + .await + .unwrap_or_else(|_| panic!("claim block should succeed for round {round}")); + + let _claim_block_header_result = proxy_global_state + .claim_block_header_input( + &available_block_info.block_hash, + builder_state_id.parent_view.u64(), + leader_pub, + &signed_parent_commitment, + ) + .await + .unwrap_or_else(|_| panic!("claim block header input should succeed for round {round}")); + + progress_round_with_transactions( + builder_state_id, + claim_block_result.block_payload.transactions, + round, + da_proposal_sender, + quorum_proposal_sender, + ) + .await +} + +/// [`progress_round_without_available_block_info`] is a helper function that +/// progresses the round without any available block information. +/// +/// This is the workflow that happens if the builder does not have a block to +/// propose, and consensus must continue to progress without a block built by +/// any builder. +pub async fn progress_round_without_available_block_info( + builder_state_id: BuilderStateId, + round: u64, + da_proposal_sender: &Sender>, + quorum_proposal_sender: &Sender>, +) -> BuilderStateId { + progress_round_with_transactions( + builder_state_id, + vec![], + round, + da_proposal_sender, + quorum_proposal_sender, + ) + .await +} + +/// [`progress_round_with_transactions`] is a helper function that progress +/// consensus with the given list of transactions. +/// +/// This function is used by [`progress_round_without_available_block_info`] and +/// by [`progress_round_with_available_block_info`] to progress the round with +/// the given transactions. +async fn progress_round_with_transactions( + builder_state_id: BuilderStateId, + transactions: Vec, + round: u64, + da_proposal_sender: &Sender>, + quorum_proposal_sender: &Sender>, +) -> BuilderStateId { + let (leader_pub, leader_priv) = BLSPubKey::generated_from_seed_indexed([0; 32], round); + let encoded_transactions = TestTransaction::encode(&transactions); + let next_view = builder_state_id.parent_view + 1; + + // Create and send the DA Proposals and Quorum Proposals + { + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let da_signature = + ::SignatureKey::sign( + &leader_priv, + &encoded_transactions_hash, + ) + .expect("should sign encoded transactions hash successfully"); + + let metadata = TestMetadata { + num_transactions: transactions.len() as u64, + }; + + da_proposal_sender + .broadcast(MessageType::DaProposalMessage(DaProposalMessage { + proposal: Arc::new(Proposal { + data: DaProposal:: { + encoded_transactions: encoded_transactions.clone().into(), + metadata, + view_number: next_view, + }, + signature: da_signature, + _pd: Default::default(), + }), + sender: leader_pub, + })) + .await + .expect("should broadcast DA Proposal successfully"); + + let payload_commitment = + vid_commitment(&encoded_transactions, TEST_NUM_NODES_IN_VID_COMPUTATION); + + let (block_payload, metadata) = + >::from_transactions( + transactions, + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + .unwrap(); + + let builder_commitment = >::builder_commitment( + &block_payload, + &metadata, + ); + + let block_header = TestBlockHeader { + block_number: round, + payload_commitment, + builder_commitment, + timestamp: round, + metadata, + random: 0, + }; + + let qc_proposal = QuorumProposal:: { + block_header, + view_number: next_view, + justify_qc: QuorumCertificate::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await, + upgrade_certificate: None, + proposal_certificate: None, + }; + + let payload_vid_commitment = + >::payload_commitment( + &qc_proposal.block_header, + ); + + let qc_signature = ::SignatureKey::sign( + &leader_priv, + payload_vid_commitment.as_ref(), + ).expect("Failed to sign payload commitment while preparing QC proposal"); + + quorum_proposal_sender + .broadcast(MessageType::QuorumProposalMessage(QuorumProposalMessage { + proposal: Arc::new(Proposal { + data: qc_proposal.clone(), + signature: qc_signature, + _pd: Default::default(), + }), + sender: leader_pub, + })) + .await + .expect("should broadcast QC Proposal successfully"); + + BuilderStateId { + parent_commitment: payload_vid_commitment, + parent_view: next_view, + } + } +} + +/// [test_empty_block_rate] is a test to ensure that if we don't have any +/// transactions being submitted, that the builder will continue it's current +/// behavior of not proposing empty blocks. +/// +/// |> Note: this test simulates how consensus interacts with the Builder in a +/// |> very basic way. When consensus asks for available blocks, and the +/// |> Builder returns an error that indicates that it does not have any blocks +/// |> to propose, consensus will retry a few times before giving up. As a +/// |> result the number of times that consensus has to ask the Builder for +/// |> block is an integral part of this test. +#[tokio::test] +async fn test_empty_block_rate() { + let (proxy_global_state, _, da_proposal_sender, quorum_proposal_sender, _) = + setup_builder_for_test(); + + let mut current_builder_state_id = BuilderStateId:: { + parent_commitment: vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::genesis(), + }; + + for round in 0..10 { + let (attempts, available_available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + + assert_eq!( + attempts, TEST_NUM_CONSENSUS_RETRIES, + "Consensus should retry {TEST_NUM_CONSENSUS_RETRIES} times to get available blocks" + ); + assert!(available_available_blocks_result.is_err()); + + current_builder_state_id = progress_round_without_available_block_info( + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } +} + +/// [test_eager_block_rate] is a test that ensures that the builder will propose +/// empty blocks, if consensus indicates a proposal included transactions. +/// +/// It checks initially that it does not propose any empty blocks in round 0. +/// It checks that it proposes a block with transactions in round 1, which +/// gets included by consensus. +/// It then checks that the next `allow_empty_block_period` rounds return empty +/// blocks without the need to retry. +/// It then checks that the remaining round up to 9 will not propose any empty +/// blocks. +/// +/// |> Note: this test simulates how consensus interacts with the Builder in a +/// |> very basic way. When consensus asks for available blocks, and the +/// |> Builder returns an error that indicates that it does not have any blocks +/// |> to propose, consensus will retry a few times before giving up. As a +/// |> result the number of times that consensus has to ask the Builder for +/// |> block is an integral part of this test. +#[tokio::test] +async fn test_eager_block_rate() { + let (proxy_global_state, _, da_proposal_sender, quorum_proposal_sender, _) = + setup_builder_for_test(); + + let mut current_builder_state_id = BuilderStateId:: { + parent_commitment: vid_commitment(&[], TEST_NUM_NODES_IN_VID_COMPUTATION), + parent_view: ViewNumber::genesis(), + }; + + // Round 0 + { + let round = 0; + let (attempts, available_available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + + assert_eq!( + attempts, TEST_NUM_CONSENSUS_RETRIES, + "Consensus should retry {TEST_NUM_CONSENSUS_RETRIES} times to get available blocks for round {round}" + ); + + assert!( + available_available_blocks_result.is_err(), + "builder should not propose empty blocks for round {round}" + ); + + current_builder_state_id = progress_round_without_available_block_info( + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } + + // Round 1, submit a single transaction, and advance the round + { + proxy_global_state + .submit_txns(vec![TestTransaction::new(vec![1])]) + .await + .expect("should submit transaction without issue"); + + let round = 1; + + let (attempts, available_available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + + assert_eq!( + attempts, 1, + "Consensus should not have needed to retry at all for round {round}" + ); + + assert!( + available_available_blocks_result.is_ok(), + "builder should be proposing empty blocks for round {round}" + ); + + current_builder_state_id = progress_round_with_available_block_info( + &proxy_global_state, + available_available_blocks_result.unwrap()[0].clone(), + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } + + // rounds 2 through 2 + ALLOW_EMPTY_BLOCK_PERIOD - 1 should propose empty + // blocks. + for round in 2..(2 + ALLOW_EMPTY_BLOCK_PERIOD) { + let (attempts, available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + + assert_eq!( + attempts, 1, + "Consensus should not have needed to retry at all for round {round}" + ); + + assert!( + available_blocks_result.is_ok(), + "builder should be proposing empty blocks for round {round}" + ); + + let available_blocks_result = available_blocks_result.unwrap(); + + assert_eq!( + available_blocks_result[0].block_size, 0, + "the block should be empty for round {round}" + ); + + current_builder_state_id = progress_round_with_available_block_info( + &proxy_global_state, + available_blocks_result[0].clone(), + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } + + // rounds 2 + ALLOW_EMPTY_BLOCK_PERIOD through 9 should not propose empty + for round in (2 + ALLOW_EMPTY_BLOCK_PERIOD)..10 { + let (attempts, available_blocks_result) = process_available_blocks_round( + &proxy_global_state, + current_builder_state_id.clone(), + round, + ) + .await; + + assert_eq!( + attempts, TEST_NUM_CONSENSUS_RETRIES, + "Consensus should have retries {TEST_NUM_CONSENSUS_RETRIES} times for round {round}" + ); + assert!(available_blocks_result.is_err()); + + current_builder_state_id = progress_round_without_available_block_info( + current_builder_state_id, + round, + &da_proposal_sender, + &quorum_proposal_sender, + ) + .await; + } +} diff --git a/crates/legacy/src/old/testing/mod.rs b/crates/legacy/src/old/testing/mod.rs new file mode 100644 index 00000000..5633788f --- /dev/null +++ b/crates/legacy/src/old/testing/mod.rs @@ -0,0 +1,261 @@ +use std::{collections::VecDeque, marker::PhantomData}; + +use crate::{ + builder_state::{ + BuilderState, DAProposalInfo, DaProposalMessage, MessageType, QuorumProposalMessage, + }, + implementation::LegacyCommit, + service::ReceivedTransaction, +}; +use async_broadcast::broadcast; +use async_broadcast::Sender as BroadcastSender; +use hotshot::{ + traits::BlockPayload, + types::{BLSPubKey, SignatureKey}, +}; +use hotshot_types::{ + data::{DaProposal, Leaf, QuorumProposal, ViewNumber}, + message::Proposal, + simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_vote::QuorumData, + traits::{block_contents::vid_commitment, node_implementation::ConsensusTime}, + utils::BuilderCommitment, +}; + +use hotshot_example_types::{ + block_types::{TestBlockHeader, TestBlockPayload, TestMetadata, TestTransaction}, + node_types::{TestTypes, TestVersions}, + state_types::{TestInstanceState, TestValidatedState}, +}; +use sha2::{Digest, Sha256}; + +use crate::service::GlobalState; +use async_lock::RwLock; +use committable::{Commitment, CommitmentBoundsArkless, Committable}; +use marketplace_builder_shared::{ + block::{BuilderStateId, ParentBlockReferences}, + testing::constants::{ + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, TEST_MAX_TX_NUM, TEST_PROTOCOL_MAX_BLOCK_SIZE, + }, +}; +use std::sync::Arc; +use std::time::Duration; + +mod basic_test; +pub mod finalization_test; + +pub async fn create_builder_state( + channel_capacity: usize, + num_storage_nodes: usize, +) -> ( + BroadcastSender>, + Arc>>, + BuilderState, +) { + // set up the broadcast channels + let (bootstrap_sender, bootstrap_receiver) = + broadcast::>(channel_capacity); + let (_decide_sender, decide_receiver) = broadcast::>(channel_capacity); + let (_da_sender, da_receiver) = broadcast::>(channel_capacity); + let (_quorum_sender, quorum_proposal_receiver) = + broadcast::>(channel_capacity); + let (senders, _receivers) = broadcast::>(channel_capacity); + let (tx_sender, tx_receiver) = + broadcast::>>(channel_capacity); + + let genesis_vid_commitment = vid_commitment(&[], num_storage_nodes); + let genesis_builder_commitment = BuilderCommitment::from_bytes([]); + + // instantiate the global state + let global_state = Arc::new(RwLock::new(GlobalState::::new( + bootstrap_sender, + tx_sender.clone(), + genesis_vid_commitment, + ViewNumber::genesis(), + ViewNumber::genesis(), + TEST_MAX_BLOCK_SIZE_INCREMENT_PERIOD, + TEST_PROTOCOL_MAX_BLOCK_SIZE, + num_storage_nodes, + TEST_MAX_TX_NUM, + ))); + + // instantiate the bootstrap builder state + let builder_state = BuilderState::new( + ParentBlockReferences { + view_number: ViewNumber::new(0), + vid_commitment: genesis_vid_commitment, + leaf_commit: Commitment::>::default_commitment_no_preimage(), + builder_commitment: genesis_builder_commitment, + // Unused in old legacy builder: + last_nonempty_view: None, + tx_count: 0, + }, + decide_receiver.clone(), + da_receiver.clone(), + quorum_proposal_receiver.clone(), + bootstrap_receiver, + tx_receiver, + VecDeque::new(), + global_state.clone(), + Duration::from_millis(100), + 1, + Arc::new(TestInstanceState::default()), + Duration::from_millis(100), + Arc::new(TestValidatedState::default()), + ); + + (senders, global_state, builder_state) +} + +/// get transactions submitted in previous rounds, [] for genesis +/// and simulate the block built from those +pub async fn calc_proposal_msg( + num_storage_nodes: usize, + round: usize, + prev_quorum_proposal: Option>, + transactions: Vec, +) -> ( + QuorumProposal, + QuorumProposalMessage, + DaProposalMessage, + BuilderStateId, +) { + // get transactions submitted in previous rounds, [] for genesis + // and simulate the block built from those + let num_transactions = transactions.len() as u64; + let encoded_transactions = TestTransaction::encode(&transactions); + let block_payload = TestBlockPayload { transactions }; + let block_vid_commitment = vid_commitment(&encoded_transactions, num_storage_nodes); + let metadata = TestMetadata { num_transactions }; + let block_builder_commitment = + >::builder_commitment( + &block_payload, + &metadata, + ); + + // generate key for leader of this round + let seed = [round as u8; 32]; + let (pub_key, private_key) = BLSPubKey::generated_from_seed_indexed(seed, round as u64); + + // Prepare the DA proposal message + let da_proposal_message: DaProposalMessage = { + let da_proposal = DaProposal { + encoded_transactions: encoded_transactions.clone().into(), + metadata: TestMetadata { + num_transactions: encoded_transactions.len() as u64, + }, + view_number: ViewNumber::new(round as u64), + }; + let encoded_transactions_hash = Sha256::digest(&encoded_transactions); + let da_signature = + ::SignatureKey::sign( + &private_key, + &encoded_transactions_hash, + ) + .expect("Failed to sign encoded tx hash while preparing da proposal"); + + DaProposalMessage:: { + proposal: Arc::new(Proposal { + data: da_proposal, + signature: da_signature.clone(), + _pd: PhantomData, + }), + sender: pub_key, + } + }; + + let block_header = TestBlockHeader { + block_number: round as u64, + payload_commitment: block_vid_commitment, + builder_commitment: block_builder_commitment, + timestamp: round as u64, + metadata, + random: 1, // arbitrary + }; + + let justify_qc = match prev_quorum_proposal.as_ref() { + None => { + QuorumCertificate::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } + Some(prev_proposal) => { + let prev_justify_qc = &prev_proposal.justify_qc; + let quorum_data = QuorumData:: { + leaf_commit: Leaf::from_quorum_proposal(prev_proposal).legacy_commit(), + }; + + // form a justify qc + SimpleCertificate::, SuccessThreshold>::new( + quorum_data.clone(), + quorum_data.commit(), + prev_proposal.view_number, + prev_justify_qc.signatures.clone(), + PhantomData, + ) + } + }; + + tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); + + let quorum_proposal = QuorumProposal:: { + block_header, + view_number: ViewNumber::new(round as u64), + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_signature = + ::SignatureKey::sign( + &private_key, + block_vid_commitment.as_ref(), + ) + .expect("Failed to sign payload commitment while preparing Quorum proposal"); + + let quorum_proposal_msg = QuorumProposalMessage:: { + proposal: Arc::new(Proposal { + data: quorum_proposal.clone(), + signature: quorum_signature, + _pd: PhantomData, + }), + sender: pub_key, + }; + + let builder_state_id = BuilderStateId { + parent_commitment: block_vid_commitment, + parent_view: ViewNumber::new(round as u64), + }; + ( + quorum_proposal, + quorum_proposal_msg, + da_proposal_message, + builder_state_id, + ) +} + +pub async fn calc_builder_commitment( + da_proposal_message: DaProposalMessage, +) -> (BuilderCommitment, DAProposalInfo) { + // If the respective builder state exists to handle the request + let proposal = da_proposal_message.proposal.clone(); + // get the view number and encoded txns from the da_proposal_data + let view_number = proposal.data.view_number; + let encoded_txns = &proposal.data.encoded_transactions; + + let metadata = &proposal.data.metadata; + // form a block payload from the encoded transactions + let block_payload = + >::from_bytes(encoded_txns, metadata); + // get the builder commitment from the block payload + let payload_builder_commitment = + >::builder_commitment(&block_payload, metadata); + // form the DA proposal info + let da_proposal_info = DAProposalInfo { + view_number, + proposal, + }; + (payload_builder_commitment, da_proposal_info) +} diff --git a/crates/legacy/src/block_size_limits.rs b/crates/legacy/src/refactored/block_size_limits.rs similarity index 100% rename from crates/legacy/src/block_size_limits.rs rename to crates/legacy/src/refactored/block_size_limits.rs diff --git a/crates/legacy/src/block_store.rs b/crates/legacy/src/refactored/block_store.rs similarity index 100% rename from crates/legacy/src/block_store.rs rename to crates/legacy/src/refactored/block_store.rs diff --git a/crates/legacy/src/refactored/lib.rs b/crates/legacy/src/refactored/lib.rs new file mode 100644 index 00000000..67d926af --- /dev/null +++ b/crates/legacy/src/refactored/lib.rs @@ -0,0 +1,18 @@ +//! Builder Phase 1 +//! It mainly provides three API services to hotshot proposers: +//! 1. Serves a proposer(leader)'s request to provide blocks information +//! 2. Serves a proposer(leader)'s request to provide the full blocks information +//! 3. Serves a proposer(leader)'s request to provide the block header information +//! +//! It also provides one API service to external users: +//! 1. Serves a user's request to submit a private transaction +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] + +pub mod block_size_limits; +pub mod block_store; +pub mod service; + +// tracking the testing +#[cfg(test)] +#[cfg_attr(coverage_nightly, coverage(off))] +pub mod testing; diff --git a/crates/legacy/src/service.rs b/crates/legacy/src/refactored/service.rs similarity index 100% rename from crates/legacy/src/service.rs rename to crates/legacy/src/refactored/service.rs diff --git a/crates/legacy/src/testing/basic.rs b/crates/legacy/src/refactored/testing/basic.rs similarity index 100% rename from crates/legacy/src/testing/basic.rs rename to crates/legacy/src/refactored/testing/basic.rs diff --git a/crates/legacy/src/testing/block_size.rs b/crates/legacy/src/refactored/testing/block_size.rs similarity index 100% rename from crates/legacy/src/testing/block_size.rs rename to crates/legacy/src/refactored/testing/block_size.rs diff --git a/crates/legacy/src/testing/finalization.rs b/crates/legacy/src/refactored/testing/finalization.rs similarity index 100% rename from crates/legacy/src/testing/finalization.rs rename to crates/legacy/src/refactored/testing/finalization.rs diff --git a/crates/legacy/src/testing/integration.rs b/crates/legacy/src/refactored/testing/integration.rs similarity index 100% rename from crates/legacy/src/testing/integration.rs rename to crates/legacy/src/refactored/testing/integration.rs diff --git a/crates/legacy/src/testing/mod.rs b/crates/legacy/src/refactored/testing/mod.rs similarity index 100% rename from crates/legacy/src/testing/mod.rs rename to crates/legacy/src/refactored/testing/mod.rs diff --git a/crates/shared/src/testing/constants.rs b/crates/shared/src/testing/constants.rs index 5a5e7a40..15128a3f 100644 --- a/crates/shared/src/testing/constants.rs +++ b/crates/shared/src/testing/constants.rs @@ -28,6 +28,8 @@ pub const TEST_CHANNEL_BUFFER_SIZE: usize = 81920; /// This is an arbitrary default value for testing. pub const TEST_TX_STATUS_CACHE_CAPACITY: usize = 10_000_000; +pub const TEST_MAX_TX_NUM: usize = TEST_TX_STATUS_CACHE_CAPACITY; + /// Governs the included transaction GC period used in tests. /// This is an arbitrary default value for testing. pub const TEST_INCLUDED_TX_GC_PERIOD: Duration = Duration::from_secs(1);